summaryrefslogtreecommitdiff
path: root/chromium/third_party/mesa/src/src
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/mesa/src/src')
l---------chromium/third_party/mesa/src/src/gallium/state_trackers/d3d1x/w32api1
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_context.c267
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_drawable.c403
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_screen.c421
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_context.c267
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_drawable.c403
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_screen.c421
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_batchbuffer.c544
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_blit.c605
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffer_objects.c850
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffers.c133
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_context.c1031
-rwxr-xr-x[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_extensions.c198
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_fbo.c965
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_mipmap_tree.c1712
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel.c168
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c341
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_copy.c231
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_draw.c59
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_read.c206
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_regions.c474
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_resolve_map.c112
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_screen.c1205
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_span.c216
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_state.c196
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_syncobj.c133
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex.c223
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_copy.c172
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_format.c66
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_image.c361
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_layout.c207
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_subimage.c178
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_validate.c219
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_batchbuffer.c544
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_blit.c605
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffer_objects.c850
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffers.c133
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_context.c1031
-rwxr-xr-x[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_extensions.c198
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_fbo.c965
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_mipmap_tree.c1712
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel.c168
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c341
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_copy.c231
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_draw.c59
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_read.c206
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_regions.c474
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_resolve_map.c112
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_screen.c1205
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_span.c216
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_state.c196
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_syncobj.c133
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex.c223
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_copy.c172
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_format.c66
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_image.c361
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_layout.c207
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_subimage.c178
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_validate.c219
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.c234
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.h53
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_chipset.h42
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_cmdbuf.h114
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.c792
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.h89
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.c634
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.h531
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.c109
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.h175
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.c512
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.h61
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fbo.c981
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.c126
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.h45
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.c611
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.h107
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_pixel_read.c222
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.c218
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.h56
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.c797
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.h123
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.c162
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.h48
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tex_copy.c161
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.c834
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.h92
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.c513
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.h39
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_dri.h116
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_macros.h129
-rw-r--r--[l---------]chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_reg.h2164
91 files changed, 34592 insertions, 91 deletions
diff --git a/chromium/third_party/mesa/src/src/gallium/state_trackers/d3d1x/w32api b/chromium/third_party/mesa/src/src/gallium/state_trackers/d3d1x/w32api
deleted file mode 120000
index e47a1989e10..00000000000
--- a/chromium/third_party/mesa/src/src/gallium/state_trackers/d3d1x/w32api
+++ /dev/null
@@ -1 +0,0 @@
-/usr/include/wine/windows \ No newline at end of file
diff --git a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_context.c b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_context.c
index 5cfbbaeb068..040382698b1 120000..100644
--- a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_context.c
+++ b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_context.c
@@ -1 +1,266 @@
-../common/dri_context.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "utils.h"
+
+#include "dri_screen.h"
+#include "dri_drawable.h"
+#include "dri_context.h"
+#include "state_tracker/drm_driver.h"
+
+#include "pipe/p_context.h"
+#include "state_tracker/st_context.h"
+
+static void
+dri_pp_query(struct dri_context *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < PP_FILTERS; i++) {
+ ctx->pp_enabled[i] = driQueryOptioni(&ctx->optionCache, pp_filters[i].name);
+ }
+}
+
+static void dri_fill_st_options(struct st_config_options *options,
+ const struct driOptionCache * optionCache)
+{
+ options->force_glsl_extensions_warn =
+ driQueryOptionb(optionCache, "force_glsl_extensions_warn");
+}
+
+GLboolean
+dri_create_context(gl_api api, const struct gl_config * visual,
+ __DRIcontext * cPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ uint32_t flags,
+ unsigned *error,
+ void *sharedContextPrivate)
+{
+ __DRIscreen *sPriv = cPriv->driScreenPriv;
+ struct dri_screen *screen = dri_screen(sPriv);
+ struct st_api *stapi = screen->st_api;
+ struct dri_context *ctx = NULL;
+ struct st_context_iface *st_share = NULL;
+ struct st_context_attribs attribs;
+ enum st_context_error ctx_err = 0;
+
+ memset(&attribs, 0, sizeof(attribs));
+ switch (api) {
+ case API_OPENGLES:
+ attribs.profile = ST_PROFILE_OPENGL_ES1;
+ break;
+ case API_OPENGLES2:
+ attribs.profile = ST_PROFILE_OPENGL_ES2;
+ break;
+ case API_OPENGL:
+ attribs.profile = ST_PROFILE_DEFAULT;
+ attribs.major = major_version;
+ attribs.minor = minor_version;
+
+ if ((flags & __DRI_CTX_FLAG_DEBUG) != 0)
+ attribs.flags |= ST_CONTEXT_FLAG_DEBUG;
+
+ if ((flags & __DRI_CTX_FLAG_FORWARD_COMPATIBLE) != 0)
+ attribs.flags |= ST_CONTEXT_FLAG_FORWARD_COMPATIBLE;
+ break;
+ default:
+ *error = __DRI_CTX_ERROR_BAD_API;
+ goto fail;
+ }
+
+ if (sharedContextPrivate) {
+ st_share = ((struct dri_context *)sharedContextPrivate)->st;
+ }
+
+ ctx = CALLOC_STRUCT(dri_context);
+ if (ctx == NULL) {
+ *error = __DRI_CTX_ERROR_NO_MEMORY;
+ goto fail;
+ }
+
+ cPriv->driverPrivate = ctx;
+ ctx->cPriv = cPriv;
+ ctx->sPriv = sPriv;
+
+ driParseConfigFiles(&ctx->optionCache,
+ &screen->optionCache, sPriv->myNum, driver_descriptor.name);
+
+ dri_fill_st_options(&attribs.options, &ctx->optionCache);
+ dri_fill_st_visual(&attribs.visual, screen, visual);
+ ctx->st = stapi->create_context(stapi, &screen->base, &attribs, &ctx_err,
+ st_share);
+ if (ctx->st == NULL) {
+ switch (ctx_err) {
+ case ST_CONTEXT_SUCCESS:
+ *error = __DRI_CTX_ERROR_SUCCESS;
+ break;
+ case ST_CONTEXT_ERROR_NO_MEMORY:
+ *error = __DRI_CTX_ERROR_NO_MEMORY;
+ break;
+ case ST_CONTEXT_ERROR_BAD_API:
+ *error = __DRI_CTX_ERROR_BAD_API;
+ break;
+ case ST_CONTEXT_ERROR_BAD_VERSION:
+ *error = __DRI_CTX_ERROR_BAD_VERSION;
+ break;
+ case ST_CONTEXT_ERROR_BAD_FLAG:
+ *error = __DRI_CTX_ERROR_BAD_FLAG;
+ break;
+ case ST_CONTEXT_ERROR_UNKNOWN_ATTRIBUTE:
+ *error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
+ break;
+ case ST_CONTEXT_ERROR_UNKNOWN_FLAG:
+ *error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
+ break;
+ }
+ goto fail;
+ }
+ ctx->st->st_manager_private = (void *) ctx;
+ ctx->stapi = stapi;
+
+ // Context successfully created. See if post-processing is requested.
+ dri_pp_query(ctx);
+
+ ctx->pp = pp_init(screen->base.screen, ctx->pp_enabled);
+
+ *error = __DRI_CTX_ERROR_SUCCESS;
+ return GL_TRUE;
+
+ fail:
+ if (ctx && ctx->st)
+ ctx->st->destroy(ctx->st);
+
+ FREE(ctx);
+ return GL_FALSE;
+}
+
+void
+dri_destroy_context(__DRIcontext * cPriv)
+{
+ struct dri_context *ctx = dri_context(cPriv);
+
+ /* note: we are freeing values and nothing more because
+ * driParseConfigFiles allocated values only - the rest
+ * is owned by screen optionCache.
+ */
+ FREE(ctx->optionCache.values);
+
+ /* No particular reason to wait for command completion before
+ * destroying a context, but we flush the context here
+ * to avoid having to add code elsewhere to cope with flushing a
+ * partially destroyed context.
+ */
+ ctx->st->flush(ctx->st, 0, NULL);
+ ctx->st->destroy(ctx->st);
+
+ if (ctx->pp) pp_free(ctx->pp);
+
+ FREE(ctx);
+}
+
+GLboolean
+dri_unbind_context(__DRIcontext * cPriv)
+{
+ /* dri_util.c ensures cPriv is not null */
+ struct dri_screen *screen = dri_screen(cPriv->driScreenPriv);
+ struct dri_context *ctx = dri_context(cPriv);
+ struct st_api *stapi = screen->st_api;
+
+ if (--ctx->bind_count == 0) {
+ if (ctx->st == ctx->stapi->get_current(ctx->stapi)) {
+ /* For conformance, unbind is supposed to flush the context.
+ * However, if we do it here we might end up flushing a partially
+ * destroyed context. Instead, we flush in dri_make_current and
+ * in dri_destroy_context which should cover all the cases.
+ */
+ stapi->make_current(stapi, NULL, NULL, NULL);
+ }
+ }
+
+ return GL_TRUE;
+}
+
+GLboolean
+dri_make_current(__DRIcontext * cPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv)
+{
+ /* dri_util.c ensures cPriv is not null */
+ struct dri_context *ctx = dri_context(cPriv);
+ struct dri_drawable *draw = dri_drawable(driDrawPriv);
+ struct dri_drawable *read = dri_drawable(driReadPriv);
+ struct st_context_iface *old_st = ctx->stapi->get_current(ctx->stapi);
+
+ /* Flush the old context here so we don't have to flush on unbind() */
+ if (old_st && old_st != ctx->st)
+ old_st->flush(old_st, ST_FLUSH_FRONT, NULL);
+
+ ++ctx->bind_count;
+
+ if (!driDrawPriv && !driReadPriv)
+ return ctx->stapi->make_current(ctx->stapi, ctx->st, NULL, NULL);
+ else if (!driDrawPriv || !driReadPriv)
+ return GL_FALSE;
+
+ if (ctx->dPriv != driDrawPriv) {
+ ctx->dPriv = driDrawPriv;
+ draw->texture_stamp = driDrawPriv->lastStamp - 1;
+ }
+ if (ctx->rPriv != driReadPriv) {
+ ctx->rPriv = driReadPriv;
+ read->texture_stamp = driReadPriv->lastStamp - 1;
+ }
+
+ ctx->stapi->make_current(ctx->stapi, ctx->st, &draw->base, &read->base);
+
+ // This is ok to call here. If they are already init, it's a no-op.
+ if (draw->textures[ST_ATTACHMENT_BACK_LEFT] && draw->textures[ST_ATTACHMENT_DEPTH_STENCIL]
+ && ctx->pp)
+ pp_init_fbos(ctx->pp, draw->textures[ST_ATTACHMENT_BACK_LEFT]->width0,
+ draw->textures[ST_ATTACHMENT_BACK_LEFT]->height0);
+
+ return GL_TRUE;
+}
+
+struct dri_context *
+dri_get_current(__DRIscreen *sPriv)
+{
+ struct dri_screen *screen = dri_screen(sPriv);
+ struct st_api *stapi = screen->st_api;
+ struct st_context_iface *st;
+
+ st = stapi->get_current(stapi);
+
+ return (struct dri_context *) (st) ? st->st_manager_private : NULL;
+}
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_drawable.c b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_drawable.c
index 0fc19be6ea6..5a261ddb300 120000..100644
--- a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_drawable.c
+++ b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_drawable.c
@@ -1 +1,402 @@
-../common/dri_drawable.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "dri_screen.h"
+#include "dri_context.h"
+#include "dri_drawable.h"
+
+#include "pipe/p_screen.h"
+#include "util/u_format.h"
+#include "util/u_memory.h"
+#include "util/u_inlines.h"
+
+static void
+swap_fences_unref(struct dri_drawable *draw);
+
+static boolean
+dri_st_framebuffer_validate(struct st_framebuffer_iface *stfbi,
+ const enum st_attachment_type *statts,
+ unsigned count,
+ struct pipe_resource **out)
+{
+ struct dri_drawable *drawable =
+ (struct dri_drawable *) stfbi->st_manager_private;
+ struct dri_screen *screen = dri_screen(drawable->sPriv);
+ unsigned statt_mask, new_mask;
+ boolean new_stamp;
+ int i;
+ unsigned int lastStamp;
+
+ statt_mask = 0x0;
+ for (i = 0; i < count; i++)
+ statt_mask |= (1 << statts[i]);
+
+ /* record newly allocated textures */
+ new_mask = (statt_mask & ~drawable->texture_mask);
+
+ /*
+ * dPriv->dri2.stamp is the server stamp. dPriv->lastStamp is the
+ * client stamp. It has the value of the server stamp when last
+ * checked.
+ */
+ do {
+ lastStamp = drawable->dPriv->lastStamp;
+ new_stamp = (drawable->texture_stamp != lastStamp);
+
+ if (new_stamp || new_mask || screen->broken_invalidate) {
+ if (new_stamp && drawable->update_drawable_info)
+ drawable->update_drawable_info(drawable);
+
+ drawable->allocate_textures(drawable, statts, count);
+
+ /* add existing textures */
+ for (i = 0; i < ST_ATTACHMENT_COUNT; i++) {
+ if (drawable->textures[i])
+ statt_mask |= (1 << i);
+ }
+
+ drawable->texture_stamp = lastStamp;
+ drawable->texture_mask = statt_mask;
+ }
+ } while (lastStamp != drawable->dPriv->lastStamp);
+
+ if (!out)
+ return TRUE;
+
+ for (i = 0; i < count; i++) {
+ out[i] = NULL;
+ pipe_resource_reference(&out[i], drawable->textures[statts[i]]);
+ }
+
+ return TRUE;
+}
+
+static boolean
+dri_st_framebuffer_flush_front(struct st_framebuffer_iface *stfbi,
+ enum st_attachment_type statt)
+{
+ struct dri_drawable *drawable =
+ (struct dri_drawable *) stfbi->st_manager_private;
+
+ /* XXX remove this and just set the correct one on the framebuffer */
+ drawable->flush_frontbuffer(drawable, statt);
+
+ return TRUE;
+}
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+boolean
+dri_create_buffer(__DRIscreen * sPriv,
+ __DRIdrawable * dPriv,
+ const struct gl_config * visual, boolean isPixmap)
+{
+ struct dri_screen *screen = sPriv->driverPrivate;
+ struct dri_drawable *drawable = NULL;
+
+ if (isPixmap)
+ goto fail; /* not implemented */
+
+ drawable = CALLOC_STRUCT(dri_drawable);
+ if (drawable == NULL)
+ goto fail;
+
+ dri_fill_st_visual(&drawable->stvis, screen, visual);
+
+ /* setup the st_framebuffer_iface */
+ drawable->base.visual = &drawable->stvis;
+ drawable->base.flush_front = dri_st_framebuffer_flush_front;
+ drawable->base.validate = dri_st_framebuffer_validate;
+ drawable->base.st_manager_private = (void *) drawable;
+
+ drawable->screen = screen;
+ drawable->sPriv = sPriv;
+ drawable->dPriv = dPriv;
+ drawable->desired_fences = screen->default_throttle_frames;
+ if (drawable->desired_fences > DRI_SWAP_FENCES_MAX)
+ drawable->desired_fences = DRI_SWAP_FENCES_MAX;
+
+ dPriv->driverPrivate = (void *)drawable;
+ p_atomic_set(&drawable->base.stamp, 1);
+
+ return GL_TRUE;
+fail:
+ FREE(drawable);
+ return GL_FALSE;
+}
+
+void
+dri_destroy_buffer(__DRIdrawable * dPriv)
+{
+ struct dri_drawable *drawable = dri_drawable(dPriv);
+ int i;
+
+ pipe_surface_reference(&drawable->drisw_surface, NULL);
+
+ for (i = 0; i < ST_ATTACHMENT_COUNT; i++)
+ pipe_resource_reference(&drawable->textures[i], NULL);
+
+ swap_fences_unref(drawable);
+
+ FREE(drawable);
+}
+
+/**
+ * Validate the texture at an attachment. Allocate the texture if it does not
+ * exist. Used by the TFP extension.
+ */
+static void
+dri_drawable_validate_att(struct dri_drawable *drawable,
+ enum st_attachment_type statt)
+{
+ enum st_attachment_type statts[ST_ATTACHMENT_COUNT];
+ unsigned i, count = 0;
+
+ /* check if buffer already exists */
+ if (drawable->texture_mask & (1 << statt))
+ return;
+
+ /* make sure DRI2 does not destroy existing buffers */
+ for (i = 0; i < ST_ATTACHMENT_COUNT; i++) {
+ if (drawable->texture_mask & (1 << i)) {
+ statts[count++] = i;
+ }
+ }
+ statts[count++] = statt;
+
+ drawable->texture_stamp = drawable->dPriv->lastStamp - 1;
+
+ drawable->base.validate(&drawable->base, statts, count, NULL);
+}
+
+/**
+ * These are used for GLX_EXT_texture_from_pixmap
+ */
+static void
+dri_set_tex_buffer2(__DRIcontext *pDRICtx, GLint target,
+ GLint format, __DRIdrawable *dPriv)
+{
+ struct dri_context *ctx = dri_context(pDRICtx);
+ struct dri_drawable *drawable = dri_drawable(dPriv);
+ struct pipe_resource *pt;
+
+ dri_drawable_validate_att(drawable, ST_ATTACHMENT_FRONT_LEFT);
+
+ /* Use the pipe resource associated with the X drawable */
+ pt = drawable->textures[ST_ATTACHMENT_FRONT_LEFT];
+
+ if (pt) {
+ enum pipe_format internal_format = pt->format;
+
+ if (format == __DRI_TEXTURE_FORMAT_RGB) {
+ /* only need to cover the formats recognized by dri_fill_st_visual */
+ switch (internal_format) {
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ internal_format = PIPE_FORMAT_B8G8R8X8_UNORM;
+ break;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ internal_format = PIPE_FORMAT_X8R8G8B8_UNORM;
+ break;
+ default:
+ break;
+ }
+ }
+
+ drawable->update_tex_buffer(drawable, ctx, pt);
+
+ ctx->st->teximage(ctx->st,
+ (target == GL_TEXTURE_2D) ? ST_TEXTURE_2D : ST_TEXTURE_RECT,
+ 0, internal_format, pt, FALSE);
+ }
+}
+
+static void
+dri_set_tex_buffer(__DRIcontext *pDRICtx, GLint target,
+ __DRIdrawable *dPriv)
+{
+ dri_set_tex_buffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
+}
+
+const __DRItexBufferExtension driTexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ dri_set_tex_buffer,
+ dri_set_tex_buffer2,
+ NULL,
+};
+
+/**
+ * Get the format and binding of an attachment.
+ */
+void
+dri_drawable_get_format(struct dri_drawable *drawable,
+ enum st_attachment_type statt,
+ enum pipe_format *format,
+ unsigned *bind)
+{
+ switch (statt) {
+ case ST_ATTACHMENT_FRONT_LEFT:
+ case ST_ATTACHMENT_BACK_LEFT:
+ case ST_ATTACHMENT_FRONT_RIGHT:
+ case ST_ATTACHMENT_BACK_RIGHT:
+ *format = drawable->stvis.color_format;
+ *bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
+ break;
+ case ST_ATTACHMENT_DEPTH_STENCIL:
+ *format = drawable->stvis.depth_stencil_format;
+ *bind = PIPE_BIND_DEPTH_STENCIL; /* XXX sampler? */
+ break;
+ default:
+ *format = PIPE_FORMAT_NONE;
+ *bind = 0;
+ break;
+ }
+}
+
+
+/**
+ * swap_fences_pop_front - pull a fence from the throttle queue
+ *
+ * If the throttle queue is filled to the desired number of fences,
+ * pull fences off the queue until the number is less than the desired
+ * number of fences, and return the last fence pulled.
+ */
+static struct pipe_fence_handle *
+swap_fences_pop_front(struct dri_drawable *draw)
+{
+ struct pipe_screen *screen = draw->screen->base.screen;
+ struct pipe_fence_handle *fence = NULL;
+
+ if (draw->desired_fences == 0)
+ return NULL;
+
+ if (draw->cur_fences >= draw->desired_fences) {
+ screen->fence_reference(screen, &fence, draw->swap_fences[draw->tail]);
+ screen->fence_reference(screen, &draw->swap_fences[draw->tail++], NULL);
+ draw->tail &= DRI_SWAP_FENCES_MASK;
+ --draw->cur_fences;
+ }
+ return fence;
+}
+
+
+/**
+ * swap_fences_push_back - push a fence onto the throttle queue
+ *
+ * push a fence onto the throttle queue and pull fences of the queue
+ * so that the desired number of fences are on the queue.
+ */
+static void
+swap_fences_push_back(struct dri_drawable *draw,
+ struct pipe_fence_handle *fence)
+{
+ struct pipe_screen *screen = draw->screen->base.screen;
+
+ if (!fence || draw->desired_fences == 0)
+ return;
+
+ while(draw->cur_fences == draw->desired_fences)
+ swap_fences_pop_front(draw);
+
+ draw->cur_fences++;
+ screen->fence_reference(screen, &draw->swap_fences[draw->head++],
+ fence);
+ draw->head &= DRI_SWAP_FENCES_MASK;
+}
+
+
+/**
+ * swap_fences_unref - empty the throttle queue
+ *
+ * pulls fences of the throttle queue until it is empty.
+ */
+static void
+swap_fences_unref(struct dri_drawable *draw)
+{
+ struct pipe_screen *screen = draw->screen->base.screen;
+
+ while(draw->cur_fences) {
+ screen->fence_reference(screen, &draw->swap_fences[draw->tail++], NULL);
+ draw->tail &= DRI_SWAP_FENCES_MASK;
+ --draw->cur_fences;
+ }
+}
+
+
+/**
+ * dri_throttle - A DRI2ThrottleExtension throttling function.
+ *
+ * pulls a fence off the throttling queue and waits for it if the
+ * number of fences on the throttling queue has reached the desired
+ * number.
+ *
+ * Then flushes to insert a fence at the current rendering position, and
+ * pushes that fence on the queue. This requires that the st_context_iface
+ * flush method returns a fence even if there are no commands to flush.
+ */
+static void
+dri_throttle(__DRIcontext *driCtx, __DRIdrawable *dPriv,
+ enum __DRI2throttleReason reason)
+{
+ struct dri_drawable *draw = dri_drawable(dPriv);
+ struct st_context_iface *ctxi;
+ struct pipe_screen *screen = draw->screen->base.screen;
+ struct pipe_fence_handle *fence;
+
+ if (reason != __DRI2_THROTTLE_SWAPBUFFER &&
+ reason != __DRI2_THROTTLE_FLUSHFRONT)
+ return;
+
+ fence = swap_fences_pop_front(draw);
+ if (fence) {
+ (void) screen->fence_finish(screen, fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_reference(screen, &fence, NULL);
+ }
+
+ if (driCtx == NULL)
+ return;
+
+ ctxi = dri_context(driCtx)->st;
+ ctxi->flush(ctxi, 0, &fence);
+ if (fence) {
+ swap_fences_push_back(draw, fence);
+ screen->fence_reference(screen, &fence, NULL);
+ }
+}
+
+
+const __DRI2throttleExtension dri2ThrottleExtension = {
+ .base = { __DRI2_THROTTLE, __DRI2_THROTTLE_VERSION },
+ .throttle = dri_throttle,
+};
+
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_screen.c b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_screen.c
index 847f6515f25..102a1326133 120000..100644
--- a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_screen.c
+++ b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/drm/dri_screen.c
@@ -1 +1,420 @@
-../common/dri_screen.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "utils.h"
+#include "xmlpool.h"
+
+#include "dri_screen.h"
+
+#include "util/u_inlines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_format.h"
+#include "state_tracker/st_gl_api.h" /* for st_gl_api_create */
+
+#include "util/u_debug.h"
+
+#define MSAA_VISUAL_MAX_SAMPLES 8
+
+#undef false
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN
+ DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
+ DRI_CONF_SECTION_END
+
+ DRI_CONF_SECTION_QUALITY
+/* DRI_CONF_FORCE_S3TC_ENABLE(false) */
+ DRI_CONF_ALLOW_LARGE_TEXTURES(1)
+ DRI_CONF_PP_CELSHADE(0)
+ DRI_CONF_PP_NORED(0)
+ DRI_CONF_PP_NOGREEN(0)
+ DRI_CONF_PP_NOBLUE(0)
+ DRI_CONF_PP_JIMENEZMLAA(0, 0, 32)
+ DRI_CONF_PP_JIMENEZMLAA_COLOR(0, 0, 32)
+ DRI_CONF_SECTION_END
+
+ DRI_CONF_SECTION_DEBUG
+ DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN(false)
+ DRI_CONF_SECTION_END
+
+ DRI_CONF_END;
+
+#define false 0
+
+static const uint __driNConfigOptions = 10;
+
+static const __DRIconfig **
+dri_fill_in_modes(struct dri_screen *screen,
+ unsigned pixel_bits)
+{
+ __DRIconfig **configs = NULL;
+ __DRIconfig **configs_r5g6b5 = NULL;
+ __DRIconfig **configs_a8r8g8b8 = NULL;
+ __DRIconfig **configs_x8r8g8b8 = NULL;
+ uint8_t depth_bits_array[5];
+ uint8_t stencil_bits_array[5];
+ uint8_t msaa_samples_array[MSAA_VISUAL_MAX_SAMPLES];
+ unsigned depth_buffer_factor;
+ unsigned back_buffer_factor;
+ unsigned msaa_samples_factor, msaa_samples_max;
+ unsigned i;
+ struct pipe_screen *p_screen = screen->base.screen;
+ boolean pf_r5g6b5, pf_a8r8g8b8, pf_x8r8g8b8;
+ boolean pf_z16, pf_x8z24, pf_z24x8, pf_s8z24, pf_z24s8, pf_z32;
+
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+
+ depth_bits_array[0] = 0;
+ stencil_bits_array[0] = 0;
+ depth_buffer_factor = 1;
+
+ msaa_samples_max = (screen->st_api->feature_mask & ST_API_FEATURE_MS_VISUALS)
+ ? MSAA_VISUAL_MAX_SAMPLES : 1;
+
+ pf_x8z24 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_z24x8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_X8Z24_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_s8z24 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z24_UNORM_S8_UINT,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_z24s8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_S8_UINT_Z24_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_a8r8g8b8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_RENDER_TARGET);
+ pf_x8r8g8b8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8X8_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_RENDER_TARGET);
+ pf_r5g6b5 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_B5G6R5_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_RENDER_TARGET);
+
+ /* We can only get a 16 or 32 bit depth buffer with getBuffersWithFormat */
+ if (dri_with_format(screen->sPriv)) {
+ pf_z16 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z16_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_z32 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z32_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ } else {
+ pf_z16 = FALSE;
+ pf_z32 = FALSE;
+ }
+
+ if (pf_z16) {
+ depth_bits_array[depth_buffer_factor] = 16;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ }
+ if (pf_x8z24 || pf_z24x8) {
+ depth_bits_array[depth_buffer_factor] = 24;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ screen->d_depth_bits_last = pf_x8z24;
+ }
+ if (pf_s8z24 || pf_z24s8) {
+ depth_bits_array[depth_buffer_factor] = 24;
+ stencil_bits_array[depth_buffer_factor++] = 8;
+ screen->sd_depth_bits_last = pf_s8z24;
+ }
+ if (pf_z32) {
+ depth_bits_array[depth_buffer_factor] = 32;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ }
+
+ msaa_samples_array[0] = 0;
+ back_buffer_factor = 3;
+
+ /* Also test for color multisample support - just assume it'll work
+ * for all depth buffers.
+ */
+ if (pf_r5g6b5) {
+ msaa_samples_factor = 1;
+ for (i = 2; i <= msaa_samples_max; i++) {
+ if (p_screen->is_format_supported(p_screen, PIPE_FORMAT_B5G6R5_UNORM,
+ PIPE_TEXTURE_2D, i,
+ PIPE_BIND_RENDER_TARGET)) {
+ msaa_samples_array[msaa_samples_factor] = i;
+ msaa_samples_factor++;
+ }
+ }
+
+ configs_r5g6b5 = driCreateConfigs(GL_RGB, GL_UNSIGNED_SHORT_5_6_5,
+ depth_bits_array, stencil_bits_array,
+ depth_buffer_factor, back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array, msaa_samples_factor,
+ GL_TRUE);
+ }
+
+ if (pf_a8r8g8b8) {
+ msaa_samples_factor = 1;
+ for (i = 2; i <= msaa_samples_max; i++) {
+ if (p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_TEXTURE_2D, i,
+ PIPE_BIND_RENDER_TARGET)) {
+ msaa_samples_array[msaa_samples_factor] = i;
+ msaa_samples_factor++;
+ }
+ }
+
+ configs_a8r8g8b8 = driCreateConfigs(GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV,
+ depth_bits_array,
+ stencil_bits_array,
+ depth_buffer_factor,
+ back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array,
+ msaa_samples_factor,
+ GL_TRUE);
+ }
+
+ if (pf_x8r8g8b8) {
+ msaa_samples_factor = 1;
+ for (i = 2; i <= msaa_samples_max; i++) {
+ if (p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8X8_UNORM,
+ PIPE_TEXTURE_2D, i,
+ PIPE_BIND_RENDER_TARGET)) {
+ msaa_samples_array[msaa_samples_factor] = i;
+ msaa_samples_factor++;
+ }
+ }
+
+ configs_x8r8g8b8 = driCreateConfigs(GL_BGR, GL_UNSIGNED_INT_8_8_8_8_REV,
+ depth_bits_array,
+ stencil_bits_array,
+ depth_buffer_factor,
+ back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array,
+ msaa_samples_factor,
+ GL_TRUE);
+ }
+
+ if (pixel_bits == 16) {
+ configs = configs_r5g6b5;
+ configs = driConcatConfigs(configs, configs_a8r8g8b8);
+ configs = driConcatConfigs(configs, configs_x8r8g8b8);
+ } else {
+ configs = configs_a8r8g8b8;
+ configs = driConcatConfigs(configs, configs_x8r8g8b8);
+ configs = driConcatConfigs(configs, configs_r5g6b5);
+ }
+
+ if (configs == NULL) {
+ debug_printf("%s: driCreateConfigs failed\n", __FUNCTION__);
+ return NULL;
+ }
+
+ return (const __DRIconfig **)configs;
+}
+
+/**
+ * Roughly the converse of dri_fill_in_modes.
+ */
+void
+dri_fill_st_visual(struct st_visual *stvis, struct dri_screen *screen,
+ const struct gl_config *mode)
+{
+ memset(stvis, 0, sizeof(*stvis));
+
+ if (!mode)
+ return;
+
+ stvis->samples = mode->samples;
+
+ if (mode->redBits == 8) {
+ if (mode->alphaBits == 8)
+ stvis->color_format = PIPE_FORMAT_B8G8R8A8_UNORM;
+ else
+ stvis->color_format = PIPE_FORMAT_B8G8R8X8_UNORM;
+ } else {
+ stvis->color_format = PIPE_FORMAT_B5G6R5_UNORM;
+ }
+
+ switch (mode->depthBits) {
+ default:
+ case 0:
+ stvis->depth_stencil_format = PIPE_FORMAT_NONE;
+ break;
+ case 16:
+ stvis->depth_stencil_format = PIPE_FORMAT_Z16_UNORM;
+ break;
+ case 24:
+ if (mode->stencilBits == 0) {
+ stvis->depth_stencil_format = (screen->d_depth_bits_last) ?
+ PIPE_FORMAT_Z24X8_UNORM:
+ PIPE_FORMAT_X8Z24_UNORM;
+ } else {
+ stvis->depth_stencil_format = (screen->sd_depth_bits_last) ?
+ PIPE_FORMAT_Z24_UNORM_S8_UINT:
+ PIPE_FORMAT_S8_UINT_Z24_UNORM;
+ }
+ break;
+ case 32:
+ stvis->depth_stencil_format = PIPE_FORMAT_Z32_UNORM;
+ break;
+ }
+
+ stvis->accum_format = (mode->haveAccumBuffer) ?
+ PIPE_FORMAT_R16G16B16A16_SNORM : PIPE_FORMAT_NONE;
+
+ stvis->buffer_mask |= ST_ATTACHMENT_FRONT_LEFT_MASK;
+ stvis->render_buffer = ST_ATTACHMENT_FRONT_LEFT;
+ if (mode->doubleBufferMode) {
+ stvis->buffer_mask |= ST_ATTACHMENT_BACK_LEFT_MASK;
+ stvis->render_buffer = ST_ATTACHMENT_BACK_LEFT;
+ }
+ if (mode->stereoMode) {
+ stvis->buffer_mask |= ST_ATTACHMENT_FRONT_RIGHT_MASK;
+ if (mode->doubleBufferMode)
+ stvis->buffer_mask |= ST_ATTACHMENT_BACK_RIGHT_MASK;
+ }
+
+ if (mode->haveDepthBuffer || mode->haveStencilBuffer)
+ stvis->buffer_mask |= ST_ATTACHMENT_DEPTH_STENCIL_MASK;
+ /* let the state tracker allocate the accum buffer */
+}
+
+static boolean
+dri_get_egl_image(struct st_manager *smapi,
+ void *egl_image,
+ struct st_egl_image *stimg)
+{
+ struct dri_screen *screen = (struct dri_screen *)smapi;
+ __DRIimage *img = NULL;
+
+ if (screen->lookup_egl_image) {
+ img = screen->lookup_egl_image(screen, egl_image);
+ }
+
+ if (!img)
+ return FALSE;
+
+ stimg->texture = NULL;
+ pipe_resource_reference(&stimg->texture, img->texture);
+ stimg->level = img->level;
+ stimg->layer = img->layer;
+
+ return TRUE;
+}
+
+static int
+dri_get_param(struct st_manager *smapi,
+ enum st_manager_param param)
+{
+ struct dri_screen *screen = (struct dri_screen *)smapi;
+
+ switch(param) {
+ case ST_MANAGER_BROKEN_INVALIDATE:
+ return screen->broken_invalidate;
+ default:
+ return 0;
+ }
+}
+
+static void
+dri_destroy_option_cache(struct dri_screen * screen)
+{
+ int i;
+
+ if (screen->optionCache.info) {
+ for (i = 0; i < (1 << screen->optionCache.tableSize); ++i) {
+ FREE(screen->optionCache.info[i].name);
+ FREE(screen->optionCache.info[i].ranges);
+ }
+ FREE(screen->optionCache.info);
+ }
+
+ FREE(screen->optionCache.values);
+}
+
+void
+dri_destroy_screen_helper(struct dri_screen * screen)
+{
+ if (screen->st_api && screen->st_api->destroy)
+ screen->st_api->destroy(screen->st_api);
+
+ if (screen->base.screen)
+ screen->base.screen->destroy(screen->base.screen);
+
+ dri_destroy_option_cache(screen);
+}
+
+void
+dri_destroy_screen(__DRIscreen * sPriv)
+{
+ struct dri_screen *screen = dri_screen(sPriv);
+
+ dri_destroy_screen_helper(screen);
+
+ FREE(screen);
+ sPriv->driverPrivate = NULL;
+ sPriv->extensions = NULL;
+}
+
+const __DRIconfig **
+dri_init_screen_helper(struct dri_screen *screen,
+ struct pipe_screen *pscreen,
+ unsigned pixel_bits)
+{
+ screen->base.screen = pscreen;
+ if (!screen->base.screen) {
+ debug_printf("%s: failed to create pipe_screen\n", __FUNCTION__);
+ return NULL;
+ }
+
+ screen->base.get_egl_image = dri_get_egl_image;
+ screen->base.get_param = dri_get_param;
+
+ screen->st_api = st_gl_api_create();
+ if (!screen->st_api)
+ return NULL;
+
+ if(pscreen->get_param(pscreen, PIPE_CAP_NPOT_TEXTURES))
+ screen->target = PIPE_TEXTURE_2D;
+ else
+ screen->target = PIPE_TEXTURE_RECT;
+
+ driParseOptionInfo(&screen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ return dri_fill_in_modes(screen, pixel_bits);
+}
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_context.c b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_context.c
index 5cfbbaeb068..040382698b1 120000..100644
--- a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_context.c
+++ b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_context.c
@@ -1 +1,266 @@
-../common/dri_context.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "utils.h"
+
+#include "dri_screen.h"
+#include "dri_drawable.h"
+#include "dri_context.h"
+#include "state_tracker/drm_driver.h"
+
+#include "pipe/p_context.h"
+#include "state_tracker/st_context.h"
+
+static void
+dri_pp_query(struct dri_context *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < PP_FILTERS; i++) {
+ ctx->pp_enabled[i] = driQueryOptioni(&ctx->optionCache, pp_filters[i].name);
+ }
+}
+
+static void dri_fill_st_options(struct st_config_options *options,
+ const struct driOptionCache * optionCache)
+{
+ options->force_glsl_extensions_warn =
+ driQueryOptionb(optionCache, "force_glsl_extensions_warn");
+}
+
+GLboolean
+dri_create_context(gl_api api, const struct gl_config * visual,
+ __DRIcontext * cPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ uint32_t flags,
+ unsigned *error,
+ void *sharedContextPrivate)
+{
+ __DRIscreen *sPriv = cPriv->driScreenPriv;
+ struct dri_screen *screen = dri_screen(sPriv);
+ struct st_api *stapi = screen->st_api;
+ struct dri_context *ctx = NULL;
+ struct st_context_iface *st_share = NULL;
+ struct st_context_attribs attribs;
+ enum st_context_error ctx_err = 0;
+
+ memset(&attribs, 0, sizeof(attribs));
+ switch (api) {
+ case API_OPENGLES:
+ attribs.profile = ST_PROFILE_OPENGL_ES1;
+ break;
+ case API_OPENGLES2:
+ attribs.profile = ST_PROFILE_OPENGL_ES2;
+ break;
+ case API_OPENGL:
+ attribs.profile = ST_PROFILE_DEFAULT;
+ attribs.major = major_version;
+ attribs.minor = minor_version;
+
+ if ((flags & __DRI_CTX_FLAG_DEBUG) != 0)
+ attribs.flags |= ST_CONTEXT_FLAG_DEBUG;
+
+ if ((flags & __DRI_CTX_FLAG_FORWARD_COMPATIBLE) != 0)
+ attribs.flags |= ST_CONTEXT_FLAG_FORWARD_COMPATIBLE;
+ break;
+ default:
+ *error = __DRI_CTX_ERROR_BAD_API;
+ goto fail;
+ }
+
+ if (sharedContextPrivate) {
+ st_share = ((struct dri_context *)sharedContextPrivate)->st;
+ }
+
+ ctx = CALLOC_STRUCT(dri_context);
+ if (ctx == NULL) {
+ *error = __DRI_CTX_ERROR_NO_MEMORY;
+ goto fail;
+ }
+
+ cPriv->driverPrivate = ctx;
+ ctx->cPriv = cPriv;
+ ctx->sPriv = sPriv;
+
+ driParseConfigFiles(&ctx->optionCache,
+ &screen->optionCache, sPriv->myNum, driver_descriptor.name);
+
+ dri_fill_st_options(&attribs.options, &ctx->optionCache);
+ dri_fill_st_visual(&attribs.visual, screen, visual);
+ ctx->st = stapi->create_context(stapi, &screen->base, &attribs, &ctx_err,
+ st_share);
+ if (ctx->st == NULL) {
+ switch (ctx_err) {
+ case ST_CONTEXT_SUCCESS:
+ *error = __DRI_CTX_ERROR_SUCCESS;
+ break;
+ case ST_CONTEXT_ERROR_NO_MEMORY:
+ *error = __DRI_CTX_ERROR_NO_MEMORY;
+ break;
+ case ST_CONTEXT_ERROR_BAD_API:
+ *error = __DRI_CTX_ERROR_BAD_API;
+ break;
+ case ST_CONTEXT_ERROR_BAD_VERSION:
+ *error = __DRI_CTX_ERROR_BAD_VERSION;
+ break;
+ case ST_CONTEXT_ERROR_BAD_FLAG:
+ *error = __DRI_CTX_ERROR_BAD_FLAG;
+ break;
+ case ST_CONTEXT_ERROR_UNKNOWN_ATTRIBUTE:
+ *error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
+ break;
+ case ST_CONTEXT_ERROR_UNKNOWN_FLAG:
+ *error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
+ break;
+ }
+ goto fail;
+ }
+ ctx->st->st_manager_private = (void *) ctx;
+ ctx->stapi = stapi;
+
+ // Context successfully created. See if post-processing is requested.
+ dri_pp_query(ctx);
+
+ ctx->pp = pp_init(screen->base.screen, ctx->pp_enabled);
+
+ *error = __DRI_CTX_ERROR_SUCCESS;
+ return GL_TRUE;
+
+ fail:
+ if (ctx && ctx->st)
+ ctx->st->destroy(ctx->st);
+
+ FREE(ctx);
+ return GL_FALSE;
+}
+
+void
+dri_destroy_context(__DRIcontext * cPriv)
+{
+ struct dri_context *ctx = dri_context(cPriv);
+
+ /* note: we are freeing values and nothing more because
+ * driParseConfigFiles allocated values only - the rest
+ * is owned by screen optionCache.
+ */
+ FREE(ctx->optionCache.values);
+
+ /* No particular reason to wait for command completion before
+ * destroying a context, but we flush the context here
+ * to avoid having to add code elsewhere to cope with flushing a
+ * partially destroyed context.
+ */
+ ctx->st->flush(ctx->st, 0, NULL);
+ ctx->st->destroy(ctx->st);
+
+ if (ctx->pp) pp_free(ctx->pp);
+
+ FREE(ctx);
+}
+
+GLboolean
+dri_unbind_context(__DRIcontext * cPriv)
+{
+ /* dri_util.c ensures cPriv is not null */
+ struct dri_screen *screen = dri_screen(cPriv->driScreenPriv);
+ struct dri_context *ctx = dri_context(cPriv);
+ struct st_api *stapi = screen->st_api;
+
+ if (--ctx->bind_count == 0) {
+ if (ctx->st == ctx->stapi->get_current(ctx->stapi)) {
+ /* For conformance, unbind is supposed to flush the context.
+ * However, if we do it here we might end up flushing a partially
+ * destroyed context. Instead, we flush in dri_make_current and
+ * in dri_destroy_context which should cover all the cases.
+ */
+ stapi->make_current(stapi, NULL, NULL, NULL);
+ }
+ }
+
+ return GL_TRUE;
+}
+
+GLboolean
+dri_make_current(__DRIcontext * cPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv)
+{
+ /* dri_util.c ensures cPriv is not null */
+ struct dri_context *ctx = dri_context(cPriv);
+ struct dri_drawable *draw = dri_drawable(driDrawPriv);
+ struct dri_drawable *read = dri_drawable(driReadPriv);
+ struct st_context_iface *old_st = ctx->stapi->get_current(ctx->stapi);
+
+ /* Flush the old context here so we don't have to flush on unbind() */
+ if (old_st && old_st != ctx->st)
+ old_st->flush(old_st, ST_FLUSH_FRONT, NULL);
+
+ ++ctx->bind_count;
+
+ if (!driDrawPriv && !driReadPriv)
+ return ctx->stapi->make_current(ctx->stapi, ctx->st, NULL, NULL);
+ else if (!driDrawPriv || !driReadPriv)
+ return GL_FALSE;
+
+ if (ctx->dPriv != driDrawPriv) {
+ ctx->dPriv = driDrawPriv;
+ draw->texture_stamp = driDrawPriv->lastStamp - 1;
+ }
+ if (ctx->rPriv != driReadPriv) {
+ ctx->rPriv = driReadPriv;
+ read->texture_stamp = driReadPriv->lastStamp - 1;
+ }
+
+ ctx->stapi->make_current(ctx->stapi, ctx->st, &draw->base, &read->base);
+
+ // This is ok to call here. If they are already init, it's a no-op.
+ if (draw->textures[ST_ATTACHMENT_BACK_LEFT] && draw->textures[ST_ATTACHMENT_DEPTH_STENCIL]
+ && ctx->pp)
+ pp_init_fbos(ctx->pp, draw->textures[ST_ATTACHMENT_BACK_LEFT]->width0,
+ draw->textures[ST_ATTACHMENT_BACK_LEFT]->height0);
+
+ return GL_TRUE;
+}
+
+struct dri_context *
+dri_get_current(__DRIscreen *sPriv)
+{
+ struct dri_screen *screen = dri_screen(sPriv);
+ struct st_api *stapi = screen->st_api;
+ struct st_context_iface *st;
+
+ st = stapi->get_current(stapi);
+
+ return (struct dri_context *) (st) ? st->st_manager_private : NULL;
+}
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_drawable.c b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_drawable.c
index 0fc19be6ea6..5a261ddb300 120000..100644
--- a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_drawable.c
+++ b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_drawable.c
@@ -1 +1,402 @@
-../common/dri_drawable.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "dri_screen.h"
+#include "dri_context.h"
+#include "dri_drawable.h"
+
+#include "pipe/p_screen.h"
+#include "util/u_format.h"
+#include "util/u_memory.h"
+#include "util/u_inlines.h"
+
+static void
+swap_fences_unref(struct dri_drawable *draw);
+
+static boolean
+dri_st_framebuffer_validate(struct st_framebuffer_iface *stfbi,
+ const enum st_attachment_type *statts,
+ unsigned count,
+ struct pipe_resource **out)
+{
+ struct dri_drawable *drawable =
+ (struct dri_drawable *) stfbi->st_manager_private;
+ struct dri_screen *screen = dri_screen(drawable->sPriv);
+ unsigned statt_mask, new_mask;
+ boolean new_stamp;
+ int i;
+ unsigned int lastStamp;
+
+ statt_mask = 0x0;
+ for (i = 0; i < count; i++)
+ statt_mask |= (1 << statts[i]);
+
+ /* record newly allocated textures */
+ new_mask = (statt_mask & ~drawable->texture_mask);
+
+ /*
+ * dPriv->dri2.stamp is the server stamp. dPriv->lastStamp is the
+ * client stamp. It has the value of the server stamp when last
+ * checked.
+ */
+ do {
+ lastStamp = drawable->dPriv->lastStamp;
+ new_stamp = (drawable->texture_stamp != lastStamp);
+
+ if (new_stamp || new_mask || screen->broken_invalidate) {
+ if (new_stamp && drawable->update_drawable_info)
+ drawable->update_drawable_info(drawable);
+
+ drawable->allocate_textures(drawable, statts, count);
+
+ /* add existing textures */
+ for (i = 0; i < ST_ATTACHMENT_COUNT; i++) {
+ if (drawable->textures[i])
+ statt_mask |= (1 << i);
+ }
+
+ drawable->texture_stamp = lastStamp;
+ drawable->texture_mask = statt_mask;
+ }
+ } while (lastStamp != drawable->dPriv->lastStamp);
+
+ if (!out)
+ return TRUE;
+
+ for (i = 0; i < count; i++) {
+ out[i] = NULL;
+ pipe_resource_reference(&out[i], drawable->textures[statts[i]]);
+ }
+
+ return TRUE;
+}
+
+static boolean
+dri_st_framebuffer_flush_front(struct st_framebuffer_iface *stfbi,
+ enum st_attachment_type statt)
+{
+ struct dri_drawable *drawable =
+ (struct dri_drawable *) stfbi->st_manager_private;
+
+ /* XXX remove this and just set the correct one on the framebuffer */
+ drawable->flush_frontbuffer(drawable, statt);
+
+ return TRUE;
+}
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+boolean
+dri_create_buffer(__DRIscreen * sPriv,
+ __DRIdrawable * dPriv,
+ const struct gl_config * visual, boolean isPixmap)
+{
+ struct dri_screen *screen = sPriv->driverPrivate;
+ struct dri_drawable *drawable = NULL;
+
+ if (isPixmap)
+ goto fail; /* not implemented */
+
+ drawable = CALLOC_STRUCT(dri_drawable);
+ if (drawable == NULL)
+ goto fail;
+
+ dri_fill_st_visual(&drawable->stvis, screen, visual);
+
+ /* setup the st_framebuffer_iface */
+ drawable->base.visual = &drawable->stvis;
+ drawable->base.flush_front = dri_st_framebuffer_flush_front;
+ drawable->base.validate = dri_st_framebuffer_validate;
+ drawable->base.st_manager_private = (void *) drawable;
+
+ drawable->screen = screen;
+ drawable->sPriv = sPriv;
+ drawable->dPriv = dPriv;
+ drawable->desired_fences = screen->default_throttle_frames;
+ if (drawable->desired_fences > DRI_SWAP_FENCES_MAX)
+ drawable->desired_fences = DRI_SWAP_FENCES_MAX;
+
+ dPriv->driverPrivate = (void *)drawable;
+ p_atomic_set(&drawable->base.stamp, 1);
+
+ return GL_TRUE;
+fail:
+ FREE(drawable);
+ return GL_FALSE;
+}
+
+void
+dri_destroy_buffer(__DRIdrawable * dPriv)
+{
+ struct dri_drawable *drawable = dri_drawable(dPriv);
+ int i;
+
+ pipe_surface_reference(&drawable->drisw_surface, NULL);
+
+ for (i = 0; i < ST_ATTACHMENT_COUNT; i++)
+ pipe_resource_reference(&drawable->textures[i], NULL);
+
+ swap_fences_unref(drawable);
+
+ FREE(drawable);
+}
+
+/**
+ * Validate the texture at an attachment. Allocate the texture if it does not
+ * exist. Used by the TFP extension.
+ */
+static void
+dri_drawable_validate_att(struct dri_drawable *drawable,
+ enum st_attachment_type statt)
+{
+ enum st_attachment_type statts[ST_ATTACHMENT_COUNT];
+ unsigned i, count = 0;
+
+ /* check if buffer already exists */
+ if (drawable->texture_mask & (1 << statt))
+ return;
+
+ /* make sure DRI2 does not destroy existing buffers */
+ for (i = 0; i < ST_ATTACHMENT_COUNT; i++) {
+ if (drawable->texture_mask & (1 << i)) {
+ statts[count++] = i;
+ }
+ }
+ statts[count++] = statt;
+
+ drawable->texture_stamp = drawable->dPriv->lastStamp - 1;
+
+ drawable->base.validate(&drawable->base, statts, count, NULL);
+}
+
+/**
+ * These are used for GLX_EXT_texture_from_pixmap
+ */
+static void
+dri_set_tex_buffer2(__DRIcontext *pDRICtx, GLint target,
+ GLint format, __DRIdrawable *dPriv)
+{
+ struct dri_context *ctx = dri_context(pDRICtx);
+ struct dri_drawable *drawable = dri_drawable(dPriv);
+ struct pipe_resource *pt;
+
+ dri_drawable_validate_att(drawable, ST_ATTACHMENT_FRONT_LEFT);
+
+ /* Use the pipe resource associated with the X drawable */
+ pt = drawable->textures[ST_ATTACHMENT_FRONT_LEFT];
+
+ if (pt) {
+ enum pipe_format internal_format = pt->format;
+
+ if (format == __DRI_TEXTURE_FORMAT_RGB) {
+ /* only need to cover the formats recognized by dri_fill_st_visual */
+ switch (internal_format) {
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ internal_format = PIPE_FORMAT_B8G8R8X8_UNORM;
+ break;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ internal_format = PIPE_FORMAT_X8R8G8B8_UNORM;
+ break;
+ default:
+ break;
+ }
+ }
+
+ drawable->update_tex_buffer(drawable, ctx, pt);
+
+ ctx->st->teximage(ctx->st,
+ (target == GL_TEXTURE_2D) ? ST_TEXTURE_2D : ST_TEXTURE_RECT,
+ 0, internal_format, pt, FALSE);
+ }
+}
+
+static void
+dri_set_tex_buffer(__DRIcontext *pDRICtx, GLint target,
+ __DRIdrawable *dPriv)
+{
+ dri_set_tex_buffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
+}
+
+const __DRItexBufferExtension driTexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ dri_set_tex_buffer,
+ dri_set_tex_buffer2,
+ NULL,
+};
+
+/**
+ * Get the format and binding of an attachment.
+ */
+void
+dri_drawable_get_format(struct dri_drawable *drawable,
+ enum st_attachment_type statt,
+ enum pipe_format *format,
+ unsigned *bind)
+{
+ switch (statt) {
+ case ST_ATTACHMENT_FRONT_LEFT:
+ case ST_ATTACHMENT_BACK_LEFT:
+ case ST_ATTACHMENT_FRONT_RIGHT:
+ case ST_ATTACHMENT_BACK_RIGHT:
+ *format = drawable->stvis.color_format;
+ *bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
+ break;
+ case ST_ATTACHMENT_DEPTH_STENCIL:
+ *format = drawable->stvis.depth_stencil_format;
+ *bind = PIPE_BIND_DEPTH_STENCIL; /* XXX sampler? */
+ break;
+ default:
+ *format = PIPE_FORMAT_NONE;
+ *bind = 0;
+ break;
+ }
+}
+
+
+/**
+ * swap_fences_pop_front - pull a fence from the throttle queue
+ *
+ * If the throttle queue is filled to the desired number of fences,
+ * pull fences off the queue until the number is less than the desired
+ * number of fences, and return the last fence pulled.
+ */
+static struct pipe_fence_handle *
+swap_fences_pop_front(struct dri_drawable *draw)
+{
+ struct pipe_screen *screen = draw->screen->base.screen;
+ struct pipe_fence_handle *fence = NULL;
+
+ if (draw->desired_fences == 0)
+ return NULL;
+
+ if (draw->cur_fences >= draw->desired_fences) {
+ screen->fence_reference(screen, &fence, draw->swap_fences[draw->tail]);
+ screen->fence_reference(screen, &draw->swap_fences[draw->tail++], NULL);
+ draw->tail &= DRI_SWAP_FENCES_MASK;
+ --draw->cur_fences;
+ }
+ return fence;
+}
+
+
+/**
+ * swap_fences_push_back - push a fence onto the throttle queue
+ *
+ * push a fence onto the throttle queue and pull fences of the queue
+ * so that the desired number of fences are on the queue.
+ */
+static void
+swap_fences_push_back(struct dri_drawable *draw,
+ struct pipe_fence_handle *fence)
+{
+ struct pipe_screen *screen = draw->screen->base.screen;
+
+ if (!fence || draw->desired_fences == 0)
+ return;
+
+ while(draw->cur_fences == draw->desired_fences)
+ swap_fences_pop_front(draw);
+
+ draw->cur_fences++;
+ screen->fence_reference(screen, &draw->swap_fences[draw->head++],
+ fence);
+ draw->head &= DRI_SWAP_FENCES_MASK;
+}
+
+
+/**
+ * swap_fences_unref - empty the throttle queue
+ *
+ * pulls fences of the throttle queue until it is empty.
+ */
+static void
+swap_fences_unref(struct dri_drawable *draw)
+{
+ struct pipe_screen *screen = draw->screen->base.screen;
+
+ while(draw->cur_fences) {
+ screen->fence_reference(screen, &draw->swap_fences[draw->tail++], NULL);
+ draw->tail &= DRI_SWAP_FENCES_MASK;
+ --draw->cur_fences;
+ }
+}
+
+
+/**
+ * dri_throttle - A DRI2ThrottleExtension throttling function.
+ *
+ * pulls a fence off the throttling queue and waits for it if the
+ * number of fences on the throttling queue has reached the desired
+ * number.
+ *
+ * Then flushes to insert a fence at the current rendering position, and
+ * pushes that fence on the queue. This requires that the st_context_iface
+ * flush method returns a fence even if there are no commands to flush.
+ */
+static void
+dri_throttle(__DRIcontext *driCtx, __DRIdrawable *dPriv,
+ enum __DRI2throttleReason reason)
+{
+ struct dri_drawable *draw = dri_drawable(dPriv);
+ struct st_context_iface *ctxi;
+ struct pipe_screen *screen = draw->screen->base.screen;
+ struct pipe_fence_handle *fence;
+
+ if (reason != __DRI2_THROTTLE_SWAPBUFFER &&
+ reason != __DRI2_THROTTLE_FLUSHFRONT)
+ return;
+
+ fence = swap_fences_pop_front(draw);
+ if (fence) {
+ (void) screen->fence_finish(screen, fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_reference(screen, &fence, NULL);
+ }
+
+ if (driCtx == NULL)
+ return;
+
+ ctxi = dri_context(driCtx)->st;
+ ctxi->flush(ctxi, 0, &fence);
+ if (fence) {
+ swap_fences_push_back(draw, fence);
+ screen->fence_reference(screen, &fence, NULL);
+ }
+}
+
+
+const __DRI2throttleExtension dri2ThrottleExtension = {
+ .base = { __DRI2_THROTTLE, __DRI2_THROTTLE_VERSION },
+ .throttle = dri_throttle,
+};
+
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_screen.c b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_screen.c
index 847f6515f25..102a1326133 120000..100644
--- a/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_screen.c
+++ b/chromium/third_party/mesa/src/src/gallium/state_trackers/dri/sw/dri_screen.c
@@ -1 +1,420 @@
-../common/dri_screen.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "utils.h"
+#include "xmlpool.h"
+
+#include "dri_screen.h"
+
+#include "util/u_inlines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_format.h"
+#include "state_tracker/st_gl_api.h" /* for st_gl_api_create */
+
+#include "util/u_debug.h"
+
+#define MSAA_VISUAL_MAX_SAMPLES 8
+
+#undef false
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN
+ DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
+ DRI_CONF_SECTION_END
+
+ DRI_CONF_SECTION_QUALITY
+/* DRI_CONF_FORCE_S3TC_ENABLE(false) */
+ DRI_CONF_ALLOW_LARGE_TEXTURES(1)
+ DRI_CONF_PP_CELSHADE(0)
+ DRI_CONF_PP_NORED(0)
+ DRI_CONF_PP_NOGREEN(0)
+ DRI_CONF_PP_NOBLUE(0)
+ DRI_CONF_PP_JIMENEZMLAA(0, 0, 32)
+ DRI_CONF_PP_JIMENEZMLAA_COLOR(0, 0, 32)
+ DRI_CONF_SECTION_END
+
+ DRI_CONF_SECTION_DEBUG
+ DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN(false)
+ DRI_CONF_SECTION_END
+
+ DRI_CONF_END;
+
+#define false 0
+
+static const uint __driNConfigOptions = 10;
+
+static const __DRIconfig **
+dri_fill_in_modes(struct dri_screen *screen,
+ unsigned pixel_bits)
+{
+ __DRIconfig **configs = NULL;
+ __DRIconfig **configs_r5g6b5 = NULL;
+ __DRIconfig **configs_a8r8g8b8 = NULL;
+ __DRIconfig **configs_x8r8g8b8 = NULL;
+ uint8_t depth_bits_array[5];
+ uint8_t stencil_bits_array[5];
+ uint8_t msaa_samples_array[MSAA_VISUAL_MAX_SAMPLES];
+ unsigned depth_buffer_factor;
+ unsigned back_buffer_factor;
+ unsigned msaa_samples_factor, msaa_samples_max;
+ unsigned i;
+ struct pipe_screen *p_screen = screen->base.screen;
+ boolean pf_r5g6b5, pf_a8r8g8b8, pf_x8r8g8b8;
+ boolean pf_z16, pf_x8z24, pf_z24x8, pf_s8z24, pf_z24s8, pf_z32;
+
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+
+ depth_bits_array[0] = 0;
+ stencil_bits_array[0] = 0;
+ depth_buffer_factor = 1;
+
+ msaa_samples_max = (screen->st_api->feature_mask & ST_API_FEATURE_MS_VISUALS)
+ ? MSAA_VISUAL_MAX_SAMPLES : 1;
+
+ pf_x8z24 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_z24x8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_X8Z24_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_s8z24 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z24_UNORM_S8_UINT,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_z24s8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_S8_UINT_Z24_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_a8r8g8b8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_RENDER_TARGET);
+ pf_x8r8g8b8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8X8_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_RENDER_TARGET);
+ pf_r5g6b5 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_B5G6R5_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_RENDER_TARGET);
+
+ /* We can only get a 16 or 32 bit depth buffer with getBuffersWithFormat */
+ if (dri_with_format(screen->sPriv)) {
+ pf_z16 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z16_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ pf_z32 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z32_UNORM,
+ PIPE_TEXTURE_2D, 0,
+ PIPE_BIND_DEPTH_STENCIL);
+ } else {
+ pf_z16 = FALSE;
+ pf_z32 = FALSE;
+ }
+
+ if (pf_z16) {
+ depth_bits_array[depth_buffer_factor] = 16;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ }
+ if (pf_x8z24 || pf_z24x8) {
+ depth_bits_array[depth_buffer_factor] = 24;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ screen->d_depth_bits_last = pf_x8z24;
+ }
+ if (pf_s8z24 || pf_z24s8) {
+ depth_bits_array[depth_buffer_factor] = 24;
+ stencil_bits_array[depth_buffer_factor++] = 8;
+ screen->sd_depth_bits_last = pf_s8z24;
+ }
+ if (pf_z32) {
+ depth_bits_array[depth_buffer_factor] = 32;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ }
+
+ msaa_samples_array[0] = 0;
+ back_buffer_factor = 3;
+
+ /* Also test for color multisample support - just assume it'll work
+ * for all depth buffers.
+ */
+ if (pf_r5g6b5) {
+ msaa_samples_factor = 1;
+ for (i = 2; i <= msaa_samples_max; i++) {
+ if (p_screen->is_format_supported(p_screen, PIPE_FORMAT_B5G6R5_UNORM,
+ PIPE_TEXTURE_2D, i,
+ PIPE_BIND_RENDER_TARGET)) {
+ msaa_samples_array[msaa_samples_factor] = i;
+ msaa_samples_factor++;
+ }
+ }
+
+ configs_r5g6b5 = driCreateConfigs(GL_RGB, GL_UNSIGNED_SHORT_5_6_5,
+ depth_bits_array, stencil_bits_array,
+ depth_buffer_factor, back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array, msaa_samples_factor,
+ GL_TRUE);
+ }
+
+ if (pf_a8r8g8b8) {
+ msaa_samples_factor = 1;
+ for (i = 2; i <= msaa_samples_max; i++) {
+ if (p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_TEXTURE_2D, i,
+ PIPE_BIND_RENDER_TARGET)) {
+ msaa_samples_array[msaa_samples_factor] = i;
+ msaa_samples_factor++;
+ }
+ }
+
+ configs_a8r8g8b8 = driCreateConfigs(GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV,
+ depth_bits_array,
+ stencil_bits_array,
+ depth_buffer_factor,
+ back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array,
+ msaa_samples_factor,
+ GL_TRUE);
+ }
+
+ if (pf_x8r8g8b8) {
+ msaa_samples_factor = 1;
+ for (i = 2; i <= msaa_samples_max; i++) {
+ if (p_screen->is_format_supported(p_screen, PIPE_FORMAT_B8G8R8X8_UNORM,
+ PIPE_TEXTURE_2D, i,
+ PIPE_BIND_RENDER_TARGET)) {
+ msaa_samples_array[msaa_samples_factor] = i;
+ msaa_samples_factor++;
+ }
+ }
+
+ configs_x8r8g8b8 = driCreateConfigs(GL_BGR, GL_UNSIGNED_INT_8_8_8_8_REV,
+ depth_bits_array,
+ stencil_bits_array,
+ depth_buffer_factor,
+ back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array,
+ msaa_samples_factor,
+ GL_TRUE);
+ }
+
+ if (pixel_bits == 16) {
+ configs = configs_r5g6b5;
+ configs = driConcatConfigs(configs, configs_a8r8g8b8);
+ configs = driConcatConfigs(configs, configs_x8r8g8b8);
+ } else {
+ configs = configs_a8r8g8b8;
+ configs = driConcatConfigs(configs, configs_x8r8g8b8);
+ configs = driConcatConfigs(configs, configs_r5g6b5);
+ }
+
+ if (configs == NULL) {
+ debug_printf("%s: driCreateConfigs failed\n", __FUNCTION__);
+ return NULL;
+ }
+
+ return (const __DRIconfig **)configs;
+}
+
+/**
+ * Roughly the converse of dri_fill_in_modes.
+ */
+void
+dri_fill_st_visual(struct st_visual *stvis, struct dri_screen *screen,
+ const struct gl_config *mode)
+{
+ memset(stvis, 0, sizeof(*stvis));
+
+ if (!mode)
+ return;
+
+ stvis->samples = mode->samples;
+
+ if (mode->redBits == 8) {
+ if (mode->alphaBits == 8)
+ stvis->color_format = PIPE_FORMAT_B8G8R8A8_UNORM;
+ else
+ stvis->color_format = PIPE_FORMAT_B8G8R8X8_UNORM;
+ } else {
+ stvis->color_format = PIPE_FORMAT_B5G6R5_UNORM;
+ }
+
+ switch (mode->depthBits) {
+ default:
+ case 0:
+ stvis->depth_stencil_format = PIPE_FORMAT_NONE;
+ break;
+ case 16:
+ stvis->depth_stencil_format = PIPE_FORMAT_Z16_UNORM;
+ break;
+ case 24:
+ if (mode->stencilBits == 0) {
+ stvis->depth_stencil_format = (screen->d_depth_bits_last) ?
+ PIPE_FORMAT_Z24X8_UNORM:
+ PIPE_FORMAT_X8Z24_UNORM;
+ } else {
+ stvis->depth_stencil_format = (screen->sd_depth_bits_last) ?
+ PIPE_FORMAT_Z24_UNORM_S8_UINT:
+ PIPE_FORMAT_S8_UINT_Z24_UNORM;
+ }
+ break;
+ case 32:
+ stvis->depth_stencil_format = PIPE_FORMAT_Z32_UNORM;
+ break;
+ }
+
+ stvis->accum_format = (mode->haveAccumBuffer) ?
+ PIPE_FORMAT_R16G16B16A16_SNORM : PIPE_FORMAT_NONE;
+
+ stvis->buffer_mask |= ST_ATTACHMENT_FRONT_LEFT_MASK;
+ stvis->render_buffer = ST_ATTACHMENT_FRONT_LEFT;
+ if (mode->doubleBufferMode) {
+ stvis->buffer_mask |= ST_ATTACHMENT_BACK_LEFT_MASK;
+ stvis->render_buffer = ST_ATTACHMENT_BACK_LEFT;
+ }
+ if (mode->stereoMode) {
+ stvis->buffer_mask |= ST_ATTACHMENT_FRONT_RIGHT_MASK;
+ if (mode->doubleBufferMode)
+ stvis->buffer_mask |= ST_ATTACHMENT_BACK_RIGHT_MASK;
+ }
+
+ if (mode->haveDepthBuffer || mode->haveStencilBuffer)
+ stvis->buffer_mask |= ST_ATTACHMENT_DEPTH_STENCIL_MASK;
+ /* let the state tracker allocate the accum buffer */
+}
+
+static boolean
+dri_get_egl_image(struct st_manager *smapi,
+ void *egl_image,
+ struct st_egl_image *stimg)
+{
+ struct dri_screen *screen = (struct dri_screen *)smapi;
+ __DRIimage *img = NULL;
+
+ if (screen->lookup_egl_image) {
+ img = screen->lookup_egl_image(screen, egl_image);
+ }
+
+ if (!img)
+ return FALSE;
+
+ stimg->texture = NULL;
+ pipe_resource_reference(&stimg->texture, img->texture);
+ stimg->level = img->level;
+ stimg->layer = img->layer;
+
+ return TRUE;
+}
+
+static int
+dri_get_param(struct st_manager *smapi,
+ enum st_manager_param param)
+{
+ struct dri_screen *screen = (struct dri_screen *)smapi;
+
+ switch(param) {
+ case ST_MANAGER_BROKEN_INVALIDATE:
+ return screen->broken_invalidate;
+ default:
+ return 0;
+ }
+}
+
+static void
+dri_destroy_option_cache(struct dri_screen * screen)
+{
+ int i;
+
+ if (screen->optionCache.info) {
+ for (i = 0; i < (1 << screen->optionCache.tableSize); ++i) {
+ FREE(screen->optionCache.info[i].name);
+ FREE(screen->optionCache.info[i].ranges);
+ }
+ FREE(screen->optionCache.info);
+ }
+
+ FREE(screen->optionCache.values);
+}
+
+void
+dri_destroy_screen_helper(struct dri_screen * screen)
+{
+ if (screen->st_api && screen->st_api->destroy)
+ screen->st_api->destroy(screen->st_api);
+
+ if (screen->base.screen)
+ screen->base.screen->destroy(screen->base.screen);
+
+ dri_destroy_option_cache(screen);
+}
+
+void
+dri_destroy_screen(__DRIscreen * sPriv)
+{
+ struct dri_screen *screen = dri_screen(sPriv);
+
+ dri_destroy_screen_helper(screen);
+
+ FREE(screen);
+ sPriv->driverPrivate = NULL;
+ sPriv->extensions = NULL;
+}
+
+const __DRIconfig **
+dri_init_screen_helper(struct dri_screen *screen,
+ struct pipe_screen *pscreen,
+ unsigned pixel_bits)
+{
+ screen->base.screen = pscreen;
+ if (!screen->base.screen) {
+ debug_printf("%s: failed to create pipe_screen\n", __FUNCTION__);
+ return NULL;
+ }
+
+ screen->base.get_egl_image = dri_get_egl_image;
+ screen->base.get_param = dri_get_param;
+
+ screen->st_api = st_gl_api_create();
+ if (!screen->st_api)
+ return NULL;
+
+ if(pscreen->get_param(pscreen, PIPE_CAP_NPOT_TEXTURES))
+ screen->target = PIPE_TEXTURE_2D;
+ else
+ screen->target = PIPE_TEXTURE_RECT;
+
+ driParseOptionInfo(&screen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ return dri_fill_in_modes(screen, pixel_bits);
+}
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_batchbuffer.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_batchbuffer.c
index d38cdf31cc6..06cbaecc74d 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_batchbuffer.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_batchbuffer.c
@@ -1 +1,543 @@
-../intel/intel_batchbuffer.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffer_objects.h"
+#include "intel_reg.h"
+#include "intel_bufmgr.h"
+#include "intel_buffers.h"
+
+struct cached_batch_item {
+ struct cached_batch_item *next;
+ uint16_t header;
+ uint16_t size;
+};
+
+static void clear_cache( struct intel_context *intel )
+{
+ struct cached_batch_item *item = intel->batch.cached_items;
+
+ while (item) {
+ struct cached_batch_item *next = item->next;
+ free(item);
+ item = next;
+ }
+
+ intel->batch.cached_items = NULL;
+}
+
+void
+intel_batchbuffer_init(struct intel_context *intel)
+{
+ intel_batchbuffer_reset(intel);
+
+ if (intel->gen >= 6) {
+ /* We can't just use brw_state_batch to get a chunk of space for
+ * the gen6 workaround because it involves actually writing to
+ * the buffer, and the kernel doesn't let us write to the batch.
+ */
+ intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
+ "pipe_control workaround",
+ 4096, 4096);
+ }
+}
+
+void
+intel_batchbuffer_reset(struct intel_context *intel)
+{
+ if (intel->batch.last_bo != NULL) {
+ drm_intel_bo_unreference(intel->batch.last_bo);
+ intel->batch.last_bo = NULL;
+ }
+ intel->batch.last_bo = intel->batch.bo;
+
+ clear_cache(intel);
+
+ intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
+ intel->maxBatchSize, 4096);
+
+ intel->batch.reserved_space = BATCH_RESERVED;
+ intel->batch.state_batch_offset = intel->batch.bo->size;
+ intel->batch.used = 0;
+ intel->batch.needs_sol_reset = false;
+}
+
+void
+intel_batchbuffer_save_state(struct intel_context *intel)
+{
+ intel->batch.saved.used = intel->batch.used;
+ intel->batch.saved.reloc_count =
+ drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
+}
+
+void
+intel_batchbuffer_reset_to_saved(struct intel_context *intel)
+{
+ drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
+
+ intel->batch.used = intel->batch.saved.used;
+
+ /* Cached batch state is dead, since we just cleared some unknown part of the
+ * batchbuffer. Assume that the caller resets any other state necessary.
+ */
+ clear_cache(intel);
+}
+
+void
+intel_batchbuffer_free(struct intel_context *intel)
+{
+ drm_intel_bo_unreference(intel->batch.last_bo);
+ drm_intel_bo_unreference(intel->batch.bo);
+ drm_intel_bo_unreference(intel->batch.workaround_bo);
+ clear_cache(intel);
+}
+
+static void
+do_batch_dump(struct intel_context *intel)
+{
+ struct drm_intel_decode *decode;
+ struct intel_batchbuffer *batch = &intel->batch;
+ int ret;
+
+ decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
+ if (!decode)
+ return;
+
+ ret = drm_intel_bo_map(batch->bo, false);
+ if (ret == 0) {
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->bo->virtual,
+ batch->bo->offset,
+ batch->used);
+ } else {
+ fprintf(stderr,
+ "WARNING: failed to map batchbuffer (%s), "
+ "dumping uploaded data instead.\n", strerror(ret));
+
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->map,
+ batch->bo->offset,
+ batch->used);
+ }
+
+ drm_intel_decode(decode);
+
+ drm_intel_decode_context_free(decode);
+
+ if (ret == 0) {
+ drm_intel_bo_unmap(batch->bo);
+
+ if (intel->vtbl.debug_batch != NULL)
+ intel->vtbl.debug_batch(intel);
+ }
+}
+
+/* TODO: Push this whole function into bufmgr.
+ */
+static int
+do_flush_locked(struct intel_context *intel)
+{
+ struct intel_batchbuffer *batch = &intel->batch;
+ int ret = 0;
+
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
+ ret = drm_intel_bo_subdata(batch->bo,
+ batch->state_batch_offset,
+ batch->bo->size - batch->state_batch_offset,
+ (char *)batch->map + batch->state_batch_offset);
+ }
+
+ if (!intel->intelScreen->no_hw) {
+ int flags;
+
+ if (intel->gen < 6 || !batch->is_blit) {
+ flags = I915_EXEC_RENDER;
+ } else {
+ flags = I915_EXEC_BLT;
+ }
+
+ if (batch->needs_sol_reset)
+ flags |= I915_EXEC_GEN7_SOL_RESET;
+
+ if (ret == 0) {
+ if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
+ intel->vtbl.annotate_aub(intel);
+ if (intel->hw_ctx == NULL || batch->is_blit) {
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
+ flags);
+ } else {
+ ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
+ 4 * batch->used, flags);
+ }
+ }
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+ do_batch_dump(intel);
+
+ if (ret != 0) {
+ fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
+ exit(1);
+ }
+ intel->vtbl.new_batch(intel);
+
+ return ret;
+}
+
+int
+_intel_batchbuffer_flush(struct intel_context *intel,
+ const char *file, int line)
+{
+ int ret;
+
+ if (intel->batch.used == 0)
+ return 0;
+
+ if (intel->first_post_swapbuffers_batch == NULL) {
+ intel->first_post_swapbuffers_batch = intel->batch.bo;
+ drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+ fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
+ 4*intel->batch.used);
+
+ intel->batch.reserved_space = 0;
+
+ if (intel->vtbl.finish_batch)
+ intel->vtbl.finish_batch(intel);
+
+ /* Mark the end of the buffer. */
+ intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
+ if (intel->batch.used & 1) {
+ /* Round batchbuffer usage to 2 DWORDs. */
+ intel_batchbuffer_emit_dword(intel, MI_NOOP);
+ }
+
+ intel_upload_finish(intel);
+
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!intel->no_batch_wrap);
+
+ ret = do_flush_locked(intel);
+
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
+ fprintf(stderr, "waiting for idle\n");
+ drm_intel_bo_wait_rendering(intel->batch.bo);
+ }
+
+ /* Reset the buffer:
+ */
+ intel_batchbuffer_reset(intel);
+
+ return ret;
+}
+
+
+/* This is the only way buffers get added to the validate list.
+ */
+bool
+intel_batchbuffer_emit_reloc(struct intel_context *intel,
+ drm_intel_bo *buffer,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret;
+
+ ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
+ buffer, delta,
+ read_domains, write_domain);
+ assert(ret == 0);
+ (void)ret;
+
+ /*
+ * Using the old buffer offset, write in what the right data would be, in case
+ * the buffer doesn't move and we can short-circuit the relocation processing
+ * in the kernel
+ */
+ intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+
+ return true;
+}
+
+bool
+intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+ drm_intel_bo *buffer,
+ uint32_t read_domains,
+ uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret;
+
+ ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
+ buffer, delta,
+ read_domains, write_domain);
+ assert(ret == 0);
+ (void)ret;
+
+ /*
+ * Using the old buffer offset, write in what the right data would
+ * be, in case the buffer doesn't move and we can short-circuit the
+ * relocation processing in the kernel
+ */
+ intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+
+ return true;
+}
+
+void
+intel_batchbuffer_data(struct intel_context *intel,
+ const void *data, GLuint bytes, bool is_blit)
+{
+ assert((bytes & 3) == 0);
+ intel_batchbuffer_require_space(intel, bytes, is_blit);
+ __memcpy(intel->batch.map + intel->batch.used, data, bytes);
+ intel->batch.used += bytes >> 2;
+}
+
+void
+intel_batchbuffer_cached_advance(struct intel_context *intel)
+{
+ struct cached_batch_item **prev = &intel->batch.cached_items, *item;
+ uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
+ uint32_t *start = intel->batch.map + intel->batch.emit;
+ uint16_t op = *start >> 16;
+
+ while (*prev) {
+ uint32_t *old;
+
+ item = *prev;
+ old = intel->batch.map + item->header;
+ if (op == *old >> 16) {
+ if (item->size == sz && memcmp(old, start, sz) == 0) {
+ if (prev != &intel->batch.cached_items) {
+ *prev = item->next;
+ item->next = intel->batch.cached_items;
+ intel->batch.cached_items = item;
+ }
+ intel->batch.used = intel->batch.emit;
+ return;
+ }
+
+ goto emit;
+ }
+ prev = &item->next;
+ }
+
+ item = malloc(sizeof(struct cached_batch_item));
+ if (item == NULL)
+ return;
+
+ item->next = intel->batch.cached_items;
+ intel->batch.cached_items = item;
+
+emit:
+ item->size = sz;
+ item->header = intel->batch.emit;
+}
+
+/**
+ * Restriction [DevSNB, DevIVB]:
+ *
+ * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
+ * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
+ * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
+ * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
+ * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
+ * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
+ * unless SW can otherwise guarantee that the pipeline from WM onwards is
+ * already flushed (e.g., via a preceding MI_FLUSH).
+ */
+void
+intel_emit_depth_stall_flushes(struct intel_context *intel)
+{
+ assert(intel->gen >= 6 && intel->gen <= 7);
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH()
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_CACHE_FLUSH);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+}
+
+/**
+ * From the BSpec, volume 2a.03: VS Stage Input / State:
+ * "[DevIVB] A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
+ * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
+ * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
+ * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
+ * to be sent before any combination of VS associated 3DSTATE."
+ */
+void
+gen7_emit_vs_workaround_flush(struct intel_context *intel)
+{
+ assert(intel->gen == 7);
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
+ OUT_RELOC(intel->batch.workaround_bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+void
+intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
+{
+ if (!intel->batch.need_workaround_flush)
+ return;
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
+ OUT_RELOC(intel->batch.workaround_bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ intel->batch.need_workaround_flush = false;
+}
+
+/* Emit a pipelined flush to either flush render and texture cache for
+ * reading from a FBO-drawn texture, or flush so that frontbuffer
+ * render appears on the screen in DRI1.
+ *
+ * This is also used for the always_flush_cache driconf debug option.
+ */
+void
+intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
+{
+ if (intel->gen >= 6) {
+ if (intel->batch.is_blit) {
+ BEGIN_BATCH_BLT(4);
+ OUT_BATCH(MI_FLUSH_DW);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ } else {
+ if (intel->gen == 6) {
+ /* Hardware workaround: SNB B-Spec says:
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
+ * Flush Enable =1, a PIPE_CONTROL with any non-zero
+ * post-sync-op is required.
+ */
+ intel_emit_post_sync_nonzero_flush(intel);
+ }
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NO_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ }
+ } else if (intel->gen >= 4) {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_NO_WRITE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ } else {
+ BEGIN_BATCH(1);
+ OUT_BATCH(MI_FLUSH);
+ ADVANCE_BATCH();
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_blit.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_blit.c
index dd6c8d17c28..36a2c6aadac 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_blit.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_blit.c
@@ -1 +1,604 @@
-../intel/intel_blit.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/mtypes.h"
+#include "main/context.h"
+#include "main/enums.h"
+#include "main/colormac.h"
+#include "main/fbobject.h"
+
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_reg.h"
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+#include "intel_mipmap_tree.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+static GLuint translate_raster_op(GLenum logicop)
+{
+ switch(logicop) {
+ case GL_CLEAR: return 0x00;
+ case GL_AND: return 0x88;
+ case GL_AND_REVERSE: return 0x44;
+ case GL_COPY: return 0xCC;
+ case GL_AND_INVERTED: return 0x22;
+ case GL_NOOP: return 0xAA;
+ case GL_XOR: return 0x66;
+ case GL_OR: return 0xEE;
+ case GL_NOR: return 0x11;
+ case GL_EQUIV: return 0x99;
+ case GL_INVERT: return 0x55;
+ case GL_OR_REVERSE: return 0xDD;
+ case GL_COPY_INVERTED: return 0x33;
+ case GL_OR_INVERTED: return 0xBB;
+ case GL_NAND: return 0x77;
+ case GL_SET: return 0xFF;
+ default: return 0;
+ }
+}
+
+static uint32_t
+br13_for_cpp(int cpp)
+{
+ switch (cpp) {
+ case 4:
+ return BR13_8888;
+ break;
+ case 2:
+ return BR13_565;
+ break;
+ case 1:
+ return BR13_8;
+ break;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+/* Copy BitBlt
+ */
+bool
+intelEmitCopyBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort src_pitch,
+ drm_intel_bo *src_buffer,
+ GLuint src_offset,
+ uint32_t src_tiling,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort src_x, GLshort src_y,
+ GLshort dst_x, GLshort dst_y,
+ GLshort w, GLshort h,
+ GLenum logic_op)
+{
+ GLuint CMD, BR13, pass = 0;
+ int dst_y2 = dst_y + h;
+ int dst_x2 = dst_x + w;
+ drm_intel_bo *aper_array[3];
+ BATCH_LOCALS;
+
+ if (dst_tiling != I915_TILING_NONE) {
+ if (dst_offset & 4095)
+ return false;
+ if (dst_tiling == I915_TILING_Y)
+ return false;
+ }
+ if (src_tiling != I915_TILING_NONE) {
+ if (src_offset & 4095)
+ return false;
+ if (src_tiling == I915_TILING_Y)
+ return false;
+ }
+
+ /* do space check before going any further */
+ do {
+ aper_array[0] = intel->batch.bo;
+ aper_array[1] = dst_buffer;
+ aper_array[2] = src_buffer;
+
+ if (dri_bufmgr_check_aperture_space(aper_array, 3) != 0) {
+ intel_batchbuffer_flush(intel);
+ pass++;
+ } else
+ break;
+ } while (pass < 2);
+
+ if (pass >= 2)
+ return false;
+
+ intel_batchbuffer_require_space(intel, 8 * 4, true);
+ DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ src_buffer, src_pitch, src_offset, src_x, src_y,
+ dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
+
+ src_pitch *= cpp;
+ dst_pitch *= cpp;
+
+ /* Blit pitch must be dword-aligned. Otherwise, the hardware appears to drop
+ * the low bits.
+ */
+ assert(src_pitch % 4 == 0);
+ assert(dst_pitch % 4 == 0);
+
+ /* For big formats (such as floating point), do the copy using 32bpp and
+ * multiply the coordinates.
+ */
+ if (cpp > 4) {
+ assert(cpp % 4 == 0);
+ dst_x *= cpp / 4;
+ dst_x2 *= cpp / 4;
+ src_x *= cpp / 4;
+ cpp = 4;
+ }
+
+ BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ break;
+ default:
+ return false;
+ }
+
+#ifndef I915
+ if (dst_tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+ if (src_tiling != I915_TILING_NONE) {
+ CMD |= XY_SRC_TILED;
+ src_pitch /= 4;
+ }
+#endif
+
+ if (dst_y2 <= dst_y || dst_x2 <= dst_x) {
+ return true;
+ }
+
+ assert(dst_x < dst_x2);
+ assert(dst_y < dst_y2);
+
+ BEGIN_BATCH_BLT(8);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13 | (uint16_t)dst_pitch);
+ OUT_BATCH((dst_y << 16) | dst_x);
+ OUT_BATCH((dst_y2 << 16) | dst_x2);
+ OUT_RELOC_FENCED(dst_buffer,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ dst_offset);
+ OUT_BATCH((src_y << 16) | src_x);
+ OUT_BATCH((uint16_t)src_pitch);
+ OUT_RELOC_FENCED(src_buffer,
+ I915_GEM_DOMAIN_RENDER, 0,
+ src_offset);
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ return true;
+}
+
+
+/**
+ * Use blitting to clear the renderbuffers named by 'flags'.
+ * Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferIndexes field
+ * since that might include software renderbuffers or renderbuffers
+ * which we're clearing with triangles.
+ * \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
+ */
+GLbitfield
+intelClearWithBlit(struct gl_context *ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLuint clear_depth_value, clear_depth_mask;
+ GLint cx, cy, cw, ch;
+ GLbitfield fail_mask = 0;
+ BATCH_LOCALS;
+
+ /*
+ * Compute values for clearing the buffers.
+ */
+ clear_depth_value = 0;
+ clear_depth_mask = 0;
+ if (mask & BUFFER_BIT_DEPTH) {
+ clear_depth_value = (GLuint) (fb->_DepthMax * ctx->Depth.Clear);
+ clear_depth_mask = XY_BLT_WRITE_RGB;
+ }
+ if (mask & BUFFER_BIT_STENCIL) {
+ clear_depth_value |= (ctx->Stencil.Clear & 0xff) << 24;
+ clear_depth_mask |= XY_BLT_WRITE_ALPHA;
+ }
+
+ cx = fb->_Xmin;
+ if (_mesa_is_winsys_fbo(fb))
+ cy = ctx->DrawBuffer->Height - fb->_Ymax;
+ else
+ cy = fb->_Ymin;
+ cw = fb->_Xmax - fb->_Xmin;
+ ch = fb->_Ymax - fb->_Ymin;
+
+ if (cw == 0 || ch == 0)
+ return 0;
+
+ /* Loop over all renderbuffers */
+ mask &= (1 << BUFFER_COUNT) - 1;
+ while (mask) {
+ GLuint buf = ffs(mask) - 1;
+ bool is_depth_stencil = buf == BUFFER_DEPTH || buf == BUFFER_STENCIL;
+ struct intel_renderbuffer *irb;
+ int x1, y1, x2, y2;
+ uint32_t clear_val;
+ uint32_t BR13, CMD;
+ struct intel_region *region;
+ int pitch, cpp;
+ drm_intel_bo *aper_array[2];
+
+ mask &= ~(1 << buf);
+
+ irb = intel_get_renderbuffer(fb, buf);
+ if (irb && irb->mt) {
+ region = irb->mt->region;
+ assert(region);
+ assert(region->bo);
+ } else {
+ fail_mask |= 1 << buf;
+ continue;
+ }
+
+ /* OK, clear this renderbuffer */
+ x1 = cx + irb->draw_x;
+ y1 = cy + irb->draw_y;
+ x2 = cx + cw + irb->draw_x;
+ y2 = cy + ch + irb->draw_y;
+
+ pitch = region->pitch;
+ cpp = region->cpp;
+
+ DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ region->bo, (pitch * cpp),
+ x1, y1, x2 - x1, y2 - y1);
+
+ BR13 = 0xf0 << 16;
+ CMD = XY_COLOR_BLT_CMD;
+
+ /* Setup the blit command */
+ if (cpp == 4) {
+ if (is_depth_stencil) {
+ CMD |= clear_depth_mask;
+ } else {
+ /* clearing RGBA */
+ CMD |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ }
+ }
+
+ assert(region->tiling != I915_TILING_Y);
+
+#ifndef I915
+ if (region->tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ pitch /= 4;
+ }
+#endif
+ BR13 |= (pitch * cpp);
+
+ if (is_depth_stencil) {
+ clear_val = clear_depth_value;
+ } else {
+ uint8_t clear[4];
+ GLfloat *color = ctx->Color.ClearColor.f;
+
+ _mesa_unclamped_float_rgba_to_ubyte(clear, color);
+
+ switch (intel_rb_format(irb)) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ clear_val = PACK_COLOR_8888(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_RGB565:
+ clear_val = PACK_COLOR_565(clear[0], clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_ARGB4444:
+ clear_val = PACK_COLOR_4444(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_ARGB1555:
+ clear_val = PACK_COLOR_1555(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_A8:
+ clear_val = PACK_COLOR_8888(clear[3], clear[3],
+ clear[3], clear[3]);
+ break;
+ default:
+ fail_mask |= 1 << buf;
+ continue;
+ }
+ }
+
+ BR13 |= br13_for_cpp(cpp);
+
+ assert(x1 < x2);
+ assert(y1 < y2);
+
+ /* do space check before going any further */
+ aper_array[0] = intel->batch.bo;
+ aper_array[1] = region->bo;
+
+ if (drm_intel_bufmgr_check_aperture_space(aper_array,
+ ARRAY_SIZE(aper_array)) != 0) {
+ intel_batchbuffer_flush(intel);
+ }
+
+ BEGIN_BATCH_BLT(6);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y1 << 16) | x1);
+ OUT_BATCH((y2 << 16) | x2);
+ OUT_RELOC_FENCED(region->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ 0);
+ OUT_BATCH(clear_val);
+ ADVANCE_BATCH();
+
+ if (intel->always_flush_cache)
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL)
+ mask &= ~(BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL);
+ }
+
+ return fail_mask;
+}
+
+bool
+intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLubyte *src_bits, GLuint src_size,
+ GLuint fg_color,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort x, GLshort y,
+ GLshort w, GLshort h,
+ GLenum logic_op)
+{
+ int dwords = ALIGN(src_size, 8) / 4;
+ uint32_t opcode, br13, blit_cmd;
+
+ if (dst_tiling != I915_TILING_NONE) {
+ if (dst_offset & 4095)
+ return false;
+ if (dst_tiling == I915_TILING_Y)
+ return false;
+ }
+
+ assert( logic_op - GL_CLEAR >= 0 );
+ assert( logic_op - GL_CLEAR < 0x10 );
+ assert(dst_pitch > 0);
+
+ if (w < 0 || h < 0)
+ return true;
+
+ dst_pitch *= cpp;
+
+ DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
+ __FUNCTION__,
+ dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
+
+ intel_batchbuffer_require_space(intel,
+ (8 * 4) +
+ (3 * 4) +
+ dwords * 4, true);
+
+ opcode = XY_SETUP_BLT_CMD;
+ if (cpp == 4)
+ opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+#ifndef I915
+ if (dst_tiling != I915_TILING_NONE) {
+ opcode |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+#endif
+
+ br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
+ br13 |= br13_for_cpp(cpp);
+
+ blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
+ if (dst_tiling != I915_TILING_NONE)
+ blit_cmd |= XY_DST_TILED;
+
+ BEGIN_BATCH_BLT(8 + 3);
+ OUT_BATCH(opcode);
+ OUT_BATCH(br13);
+ OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
+ OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
+ OUT_RELOC_FENCED(dst_buffer,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ dst_offset);
+ OUT_BATCH(0); /* bg */
+ OUT_BATCH(fg_color); /* fg */
+ OUT_BATCH(0); /* pattern base addr */
+
+ OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
+ OUT_BATCH((y << 16) | x);
+ OUT_BATCH(((y + h) << 16) | (x + w));
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_data(intel, src_bits, dwords * 4, true);
+
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ return true;
+}
+
+/* We don't have a memmove-type blit like some other hardware, so we'll do a
+ * rectangular blit covering a large space, then emit 1-scanline blit at the
+ * end to cover the last if we need.
+ */
+void
+intel_emit_linear_blit(struct intel_context *intel,
+ drm_intel_bo *dst_bo,
+ unsigned int dst_offset,
+ drm_intel_bo *src_bo,
+ unsigned int src_offset,
+ unsigned int size)
+{
+ GLuint pitch, height;
+ bool ok;
+
+ /* The pitch given to the GPU must be DWORD aligned, and
+ * we want width to match pitch. Max width is (1 << 15 - 1),
+ * rounding that down to the nearest DWORD is 1 << 15 - 4
+ */
+ pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 1), 4);
+ height = (pitch == 0) ? 1 : size / pitch;
+ ok = intelEmitCopyBlit(intel, 1,
+ pitch, src_bo, src_offset, I915_TILING_NONE,
+ pitch, dst_bo, dst_offset, I915_TILING_NONE,
+ 0, 0, /* src x/y */
+ 0, 0, /* dst x/y */
+ pitch, height, /* w, h */
+ GL_COPY);
+ assert(ok);
+
+ src_offset += pitch * height;
+ dst_offset += pitch * height;
+ size -= pitch * height;
+ assert (size < (1 << 15));
+ pitch = ALIGN(size, 4);
+ if (size != 0) {
+ ok = intelEmitCopyBlit(intel, 1,
+ pitch, src_bo, src_offset, I915_TILING_NONE,
+ pitch, dst_bo, dst_offset, I915_TILING_NONE,
+ 0, 0, /* src x/y */
+ 0, 0, /* dst x/y */
+ size, 1, /* w, h */
+ GL_COPY);
+ assert(ok);
+ }
+}
+
+/**
+ * Used to initialize the alpha value of an ARGB8888 teximage after
+ * loading it from an XRGB8888 source.
+ *
+ * This is very common with glCopyTexImage2D().
+ */
+void
+intel_set_teximage_alpha_to_one(struct gl_context *ctx,
+ struct intel_texture_image *intel_image)
+{
+ struct intel_context *intel = intel_context(ctx);
+ unsigned int image_x, image_y;
+ uint32_t x1, y1, x2, y2;
+ uint32_t BR13, CMD;
+ int pitch, cpp;
+ drm_intel_bo *aper_array[2];
+ struct intel_region *region = intel_image->mt->region;
+ int width, height, depth;
+ BATCH_LOCALS;
+
+ intel_miptree_get_dimensions_for_image(&intel_image->base.Base,
+ &width, &height, &depth);
+ assert(depth == 1);
+
+ assert(intel_image->base.Base.TexFormat == MESA_FORMAT_ARGB8888);
+
+ /* get dest x/y in destination texture */
+ intel_miptree_get_image_offset(intel_image->mt,
+ intel_image->base.Base.Level,
+ intel_image->base.Base.Face,
+ 0,
+ &image_x, &image_y);
+
+ x1 = image_x;
+ y1 = image_y;
+ x2 = image_x + width;
+ y2 = image_y + height;
+
+ pitch = region->pitch;
+ cpp = region->cpp;
+
+ DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ intel_image->mt->region->bo, (pitch * cpp),
+ x1, y1, x2 - x1, y2 - y1);
+
+ BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
+ CMD = XY_COLOR_BLT_CMD;
+ CMD |= XY_BLT_WRITE_ALPHA;
+
+ assert(region->tiling != I915_TILING_Y);
+
+#ifndef I915
+ if (region->tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ pitch /= 4;
+ }
+#endif
+ BR13 |= (pitch * cpp);
+
+ /* do space check before going any further */
+ aper_array[0] = intel->batch.bo;
+ aper_array[1] = region->bo;
+
+ if (drm_intel_bufmgr_check_aperture_space(aper_array,
+ ARRAY_SIZE(aper_array)) != 0) {
+ intel_batchbuffer_flush(intel);
+ }
+
+ BEGIN_BATCH_BLT(6);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y1 << 16) | x1);
+ OUT_BATCH((y2 << 16) | x2);
+ OUT_RELOC_FENCED(region->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ 0);
+ OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_emit_mi_flush(intel);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffer_objects.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffer_objects.c
index e06dd3c8d3c..df8ac7fb301 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffer_objects.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffer_objects.c
@@ -1 +1,849 @@
-../intel/intel_buffer_objects.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/imports.h"
+#include "main/mfeatures.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/bufferobj.h"
+
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+
+static GLboolean
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+
+/** Allocates a new drm_intel_bo to store the data for the buffer object. */
+static void
+intel_bufferobj_alloc_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
+ intel_obj->Base.Size, 64);
+}
+
+static void
+release_buffer(struct intel_buffer_object *intel_obj)
+{
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_obj->buffer = NULL;
+ intel_obj->offset = 0;
+ intel_obj->source = 0;
+}
+
+/**
+ * There is some duplication between mesa's bufferobjects and our
+ * bufmgr buffers. Both have an integer handle and a hashtable to
+ * lookup an opaque structure. It would be nice if the handles and
+ * internal structure where somehow shared.
+ */
+static struct gl_buffer_object *
+intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
+{
+ struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
+
+ _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
+
+ obj->buffer = NULL;
+
+ return &obj->Base;
+}
+
+/**
+ * Deallocate/free a vertex/pixel buffer object.
+ * Called via glDeleteBuffersARB().
+ */
+static void
+intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ /* Buffer objects are automatically unmapped when deleting according
+ * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
+ * (though it does if you call glDeleteBuffers)
+ */
+ if (obj->Pointer)
+ intel_bufferobj_unmap(ctx, obj);
+
+ free(intel_obj->sys_buffer);
+
+ drm_intel_bo_unreference(intel_obj->buffer);
+ free(intel_obj);
+}
+
+
+
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via ctx->Driver.BufferData().
+ * \return true for success, false if out of memory
+ */
+static GLboolean
+intel_bufferobj_data(struct gl_context * ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const GLvoid * data,
+ GLenum usage, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ /* Part of the ABI, but this function doesn't use it.
+ */
+#ifndef I915
+ (void) target;
+#endif
+
+ intel_obj->Base.Size = size;
+ intel_obj->Base.Usage = usage;
+
+ assert(!obj->Pointer); /* Mesa should have unmapped it */
+
+ if (intel_obj->buffer != NULL)
+ release_buffer(intel_obj);
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+
+ if (size != 0) {
+#ifdef I915
+ /* On pre-965, stick VBOs in system memory, as we're always doing
+ * swtnl with their contents anyway.
+ */
+ if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
+ intel_obj->sys_buffer = malloc(size);
+ if (intel_obj->sys_buffer != NULL) {
+ if (data != NULL)
+ memcpy(intel_obj->sys_buffer, data, size);
+ return true;
+ }
+ }
+#endif
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ if (!intel_obj->buffer)
+ return false;
+
+ if (data != NULL)
+ drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
+ }
+
+ return true;
+}
+
+
+/**
+ * Replace data in a subrange of buffer object. If the data range
+ * specified by size + offset extends beyond the end of the buffer or
+ * if data is NULL, no copy is performed.
+ * Called via glBufferSubDataARB().
+ */
+static void
+intel_bufferobj_subdata(struct gl_context * ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ bool busy;
+
+ if (size == 0)
+ return;
+
+ assert(intel_obj);
+
+ /* If we have a single copy in system memory, update that */
+ if (intel_obj->sys_buffer) {
+ if (intel_obj->source)
+ release_buffer(intel_obj);
+
+ if (intel_obj->buffer == NULL) {
+ memcpy((char *)intel_obj->sys_buffer + offset, data, size);
+ return;
+ }
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ }
+
+ /* Otherwise we need to update the copy in video memory. */
+ busy =
+ drm_intel_bo_busy(intel_obj->buffer) ||
+ drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
+
+ if (busy) {
+ if (size == intel_obj->Base.Size) {
+ /* Replace the current busy bo with fresh data. */
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
+ } else {
+ perf_debug("Using a blit copy to avoid stalling on glBufferSubData() "
+ "to a busy buffer object.\n");
+ drm_intel_bo *temp_bo =
+ drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
+
+ drm_intel_bo_subdata(temp_bo, 0, size, data);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, offset,
+ temp_bo, 0,
+ size);
+
+ drm_intel_bo_unreference(temp_bo);
+ }
+ } else {
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ if (drm_intel_bo_busy(intel_obj->buffer)) {
+ perf_debug("Stalling on the GPU in glBufferSubData().\n");
+ }
+ }
+ drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
+ }
+}
+
+
+/**
+ * Called via glGetBufferSubDataARB().
+ */
+static void
+intel_bufferobj_get_subdata(struct gl_context * ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ struct intel_context *intel = intel_context(ctx);
+
+ assert(intel_obj);
+ if (intel_obj->sys_buffer)
+ memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
+ else {
+ if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
+ intel_batchbuffer_flush(intel);
+ }
+ drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
+ }
+}
+
+
+
+/**
+ * Called via glMapBufferRange and glMapBuffer
+ *
+ * The goal of this extension is to allow apps to accumulate their rendering
+ * at the same time as they accumulate their buffer object. Without it,
+ * you'd end up blocking on execution of rendering every time you mapped
+ * the buffer to put new data in.
+ *
+ * We support it in 3 ways: If unsynchronized, then don't bother
+ * flushing the batchbuffer before mapping the buffer, which can save blocking
+ * in many cases. If we would still block, and they allow the whole buffer
+ * to be invalidated, then just allocate a new buffer to replace the old one.
+ * If not, and we'd block, and they allow the subrange of the buffer to be
+ * invalidated, then we can make a new little BO, let them write into that,
+ * and blit it into the real BO at unmap time.
+ */
+static void *
+intel_bufferobj_map_range(struct gl_context * ctx,
+ GLintptr offset, GLsizeiptr length,
+ GLbitfield access, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
+ * internally uses our functions directly.
+ */
+ obj->Offset = offset;
+ obj->Length = length;
+ obj->AccessFlags = access;
+
+ if (intel_obj->sys_buffer) {
+ const bool read_only =
+ (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
+
+ if (!read_only && intel_obj->source)
+ release_buffer(intel_obj);
+
+ if (!intel_obj->buffer || intel_obj->source) {
+ obj->Pointer = intel_obj->sys_buffer + offset;
+ return obj->Pointer;
+ }
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ }
+
+ if (intel_obj->buffer == NULL) {
+ obj->Pointer = NULL;
+ return NULL;
+ }
+
+ /* If the access is synchronized (like a normal buffer mapping), then get
+ * things flushed out so the later mapping syncs appropriately through GEM.
+ * If the user doesn't care about existing buffer contents and mapping would
+ * cause us to block, then throw out the old buffer.
+ *
+ * If they set INVALIDATE_BUFFER, we can pitch the current contents to
+ * achieve the required synchronization.
+ */
+ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
+ if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
+ if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ } else {
+ intel_flush(ctx);
+ }
+ } else if (drm_intel_bo_busy(intel_obj->buffer) &&
+ (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ }
+ }
+
+ /* If the user is mapping a range of an active buffer object but
+ * doesn't require the current contents of that range, make a new
+ * BO, and we'll copy what they put in there out at unmap or
+ * FlushRange time.
+ */
+ if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
+ drm_intel_bo_busy(intel_obj->buffer)) {
+ if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
+ intel_obj->range_map_buffer = malloc(length);
+ obj->Pointer = intel_obj->range_map_buffer;
+ } else {
+ intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
+ "range map",
+ length, 64);
+ if (!(access & GL_MAP_READ_BIT)) {
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ } else {
+ drm_intel_bo_map(intel_obj->range_map_bo,
+ (access & GL_MAP_WRITE_BIT) != 0);
+ }
+ obj->Pointer = intel_obj->range_map_bo->virtual;
+ }
+ return obj->Pointer;
+ }
+
+ if (access & GL_MAP_UNSYNCHRONIZED_BIT)
+ drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
+ else if (!(access & GL_MAP_READ_BIT)) {
+ drm_intel_gem_bo_map_gtt(intel_obj->buffer);
+ } else {
+ drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ }
+
+ obj->Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Pointer;
+}
+
+/* Ideally we'd use a BO to avoid taking up cache space for the temporary
+ * data, but FlushMappedBufferRange may be followed by further writes to
+ * the pointer, so we would have to re-map after emitting our blit, which
+ * would defeat the point.
+ */
+static void
+intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length,
+ struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ drm_intel_bo *temp_bo;
+
+ /* Unless we're in the range map using a temporary system buffer,
+ * there's no work to do.
+ */
+ if (intel_obj->range_map_buffer == NULL)
+ return;
+
+ if (length == 0)
+ return;
+
+ temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
+
+ drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, obj->Offset + offset,
+ temp_bo, 0,
+ length);
+
+ drm_intel_bo_unreference(temp_bo);
+}
+
+
+/**
+ * Called via glUnmapBuffer().
+ */
+static GLboolean
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ assert(obj->Pointer);
+ if (intel_obj->sys_buffer != NULL) {
+ /* always keep the mapping around. */
+ } else if (intel_obj->range_map_buffer != NULL) {
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+ free(intel_obj->range_map_buffer);
+ intel_obj->range_map_buffer = NULL;
+ } else if (intel_obj->range_map_bo != NULL) {
+ drm_intel_bo_unmap(intel_obj->range_map_bo);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, obj->Offset,
+ intel_obj->range_map_bo, 0,
+ obj->Length);
+
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ drm_intel_bo_unreference(intel_obj->range_map_bo);
+ intel_obj->range_map_bo = NULL;
+ } else if (intel_obj->buffer != NULL) {
+ drm_intel_bo_unmap(intel_obj->buffer);
+ }
+ obj->Pointer = NULL;
+ obj->Offset = 0;
+ obj->Length = 0;
+
+ return true;
+}
+
+drm_intel_bo *
+intel_bufferobj_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj,
+ GLuint flag)
+{
+ if (intel_obj->source)
+ release_buffer(intel_obj);
+
+ if (intel_obj->buffer == NULL) {
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ drm_intel_bo_subdata(intel_obj->buffer,
+ 0, intel_obj->Base.Size,
+ intel_obj->sys_buffer);
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ intel_obj->offset = 0;
+ }
+
+ return intel_obj->buffer;
+}
+
+#define INTEL_UPLOAD_SIZE (64*1024)
+
+void
+intel_upload_finish(struct intel_context *intel)
+{
+ if (!intel->upload.bo)
+ return;
+
+ if (intel->upload.buffer_len) {
+ drm_intel_bo_subdata(intel->upload.bo,
+ intel->upload.buffer_offset,
+ intel->upload.buffer_len,
+ intel->upload.buffer);
+ intel->upload.buffer_len = 0;
+ }
+
+ drm_intel_bo_unreference(intel->upload.bo);
+ intel->upload.bo = NULL;
+}
+
+static void wrap_buffers(struct intel_context *intel, GLuint size)
+{
+ intel_upload_finish(intel);
+
+ if (size < INTEL_UPLOAD_SIZE)
+ size = INTEL_UPLOAD_SIZE;
+
+ intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
+ intel->upload.offset = 0;
+}
+
+void intel_upload_data(struct intel_context *intel,
+ const void *ptr, GLuint size, GLuint align,
+ drm_intel_bo **return_bo,
+ GLuint *return_offset)
+{
+ GLuint base, delta;
+
+ base = (intel->upload.offset + align - 1) / align * align;
+ if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
+ wrap_buffers(intel, size);
+ base = 0;
+ }
+
+ drm_intel_bo_reference(intel->upload.bo);
+ *return_bo = intel->upload.bo;
+ *return_offset = base;
+
+ delta = base - intel->upload.offset;
+ if (intel->upload.buffer_len &&
+ intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
+ {
+ drm_intel_bo_subdata(intel->upload.bo,
+ intel->upload.buffer_offset,
+ intel->upload.buffer_len,
+ intel->upload.buffer);
+ intel->upload.buffer_len = 0;
+ }
+
+ if (size < sizeof(intel->upload.buffer))
+ {
+ if (intel->upload.buffer_len == 0)
+ intel->upload.buffer_offset = base;
+ else
+ intel->upload.buffer_len += delta;
+
+ memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
+ intel->upload.buffer_len += size;
+ }
+ else
+ {
+ drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
+ }
+
+ intel->upload.offset = base + size;
+}
+
+void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
+{
+ GLuint base, delta;
+ char *ptr;
+
+ base = (intel->upload.offset + align - 1) / align * align;
+ if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
+ wrap_buffers(intel, size);
+ base = 0;
+ }
+
+ delta = base - intel->upload.offset;
+ if (intel->upload.buffer_len &&
+ intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
+ {
+ drm_intel_bo_subdata(intel->upload.bo,
+ intel->upload.buffer_offset,
+ intel->upload.buffer_len,
+ intel->upload.buffer);
+ intel->upload.buffer_len = 0;
+ }
+
+ if (size <= sizeof(intel->upload.buffer)) {
+ if (intel->upload.buffer_len == 0)
+ intel->upload.buffer_offset = base;
+ else
+ intel->upload.buffer_len += delta;
+
+ ptr = intel->upload.buffer + intel->upload.buffer_len;
+ intel->upload.buffer_len += size;
+ } else
+ ptr = malloc(size);
+
+ return ptr;
+}
+
+void intel_upload_unmap(struct intel_context *intel,
+ const void *ptr, GLuint size, GLuint align,
+ drm_intel_bo **return_bo,
+ GLuint *return_offset)
+{
+ GLuint base;
+
+ base = (intel->upload.offset + align - 1) / align * align;
+ if (size > sizeof(intel->upload.buffer)) {
+ drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
+ free((void*)ptr);
+ }
+
+ drm_intel_bo_reference(intel->upload.bo);
+ *return_bo = intel->upload.bo;
+ *return_offset = base;
+
+ intel->upload.offset = base + size;
+}
+
+drm_intel_bo *
+intel_bufferobj_source(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj,
+ GLuint align, GLuint *offset)
+{
+ if (intel_obj->buffer == NULL) {
+ intel_upload_data(intel,
+ intel_obj->sys_buffer, intel_obj->Base.Size, align,
+ &intel_obj->buffer, &intel_obj->offset);
+ intel_obj->source = 1;
+ }
+
+ *offset = intel_obj->offset;
+ return intel_obj->buffer;
+}
+
+static void
+intel_bufferobj_copy_subdata(struct gl_context *ctx,
+ struct gl_buffer_object *src,
+ struct gl_buffer_object *dst,
+ GLintptr read_offset, GLintptr write_offset,
+ GLsizeiptr size)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_src = intel_buffer_object(src);
+ struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
+ drm_intel_bo *src_bo, *dst_bo;
+ GLuint src_offset;
+
+ if (size == 0)
+ return;
+
+ /* If we're in system memory, just map and memcpy. */
+ if (intel_src->sys_buffer || intel_dst->sys_buffer) {
+ /* The same buffer may be used, but note that regions copied may
+ * not overlap.
+ */
+ if (src == dst) {
+ char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
+ GL_MAP_READ_BIT |
+ GL_MAP_WRITE_BIT,
+ dst);
+ memmove(ptr + write_offset, ptr + read_offset, size);
+ intel_bufferobj_unmap(ctx, dst);
+ } else {
+ const char *src_ptr;
+ char *dst_ptr;
+
+ src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
+ GL_MAP_READ_BIT, src);
+ dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
+ GL_MAP_WRITE_BIT, dst);
+
+ memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
+
+ intel_bufferobj_unmap(ctx, src);
+ intel_bufferobj_unmap(ctx, dst);
+ }
+ return;
+ }
+
+ /* Otherwise, we have real BOs, so blit them. */
+
+ dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
+ src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
+
+ intel_emit_linear_blit(intel,
+ dst_bo, write_offset,
+ src_bo, read_offset + src_offset, size);
+
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+}
+
+#if FEATURE_APPLE_object_purgeable
+static GLenum
+intel_buffer_purgeable(drm_intel_bo *buffer)
+{
+ int retained = 0;
+
+ if (buffer != NULL)
+ retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
+
+ return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
+}
+
+static GLenum
+intel_buffer_object_purgeable(struct gl_context * ctx,
+ struct gl_buffer_object *obj,
+ GLenum option)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
+
+ if (intel_obj->buffer != NULL)
+ return intel_buffer_purgeable(intel_obj->buffer);
+
+ if (option == GL_RELEASED_APPLE) {
+ if (intel_obj->sys_buffer != NULL) {
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ }
+
+ return GL_RELEASED_APPLE;
+ } else {
+ /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
+ struct intel_context *intel = intel_context(ctx);
+ drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
+
+ return intel_buffer_purgeable(bo);
+ }
+}
+
+static GLenum
+intel_texture_object_purgeable(struct gl_context * ctx,
+ struct gl_texture_object *obj,
+ GLenum option)
+{
+ struct intel_texture_object *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_texture_object(obj);
+ if (intel->mt == NULL || intel->mt->region == NULL)
+ return GL_RELEASED_APPLE;
+
+ return intel_buffer_purgeable(intel->mt->region->bo);
+}
+
+static GLenum
+intel_render_object_purgeable(struct gl_context * ctx,
+ struct gl_renderbuffer *obj,
+ GLenum option)
+{
+ struct intel_renderbuffer *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_renderbuffer(obj);
+ if (intel->mt == NULL)
+ return GL_RELEASED_APPLE;
+
+ return intel_buffer_purgeable(intel->mt->region->bo);
+}
+
+static GLenum
+intel_buffer_unpurgeable(drm_intel_bo *buffer)
+{
+ int retained;
+
+ retained = 0;
+ if (buffer != NULL)
+ retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
+
+ return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
+}
+
+static GLenum
+intel_buffer_object_unpurgeable(struct gl_context * ctx,
+ struct gl_buffer_object *obj,
+ GLenum option)
+{
+ (void) ctx;
+ (void) option;
+
+ return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
+}
+
+static GLenum
+intel_texture_object_unpurgeable(struct gl_context * ctx,
+ struct gl_texture_object *obj,
+ GLenum option)
+{
+ struct intel_texture_object *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_texture_object(obj);
+ if (intel->mt == NULL || intel->mt->region == NULL)
+ return GL_UNDEFINED_APPLE;
+
+ return intel_buffer_unpurgeable(intel->mt->region->bo);
+}
+
+static GLenum
+intel_render_object_unpurgeable(struct gl_context * ctx,
+ struct gl_renderbuffer *obj,
+ GLenum option)
+{
+ struct intel_renderbuffer *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_renderbuffer(obj);
+ if (intel->mt == NULL)
+ return GL_UNDEFINED_APPLE;
+
+ return intel_buffer_unpurgeable(intel->mt->region->bo);
+}
+#endif
+
+void
+intelInitBufferObjectFuncs(struct dd_function_table *functions)
+{
+ functions->NewBufferObject = intel_bufferobj_alloc;
+ functions->DeleteBuffer = intel_bufferobj_free;
+ functions->BufferData = intel_bufferobj_data;
+ functions->BufferSubData = intel_bufferobj_subdata;
+ functions->GetBufferSubData = intel_bufferobj_get_subdata;
+ functions->MapBufferRange = intel_bufferobj_map_range;
+ functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
+ functions->UnmapBuffer = intel_bufferobj_unmap;
+ functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
+
+#if FEATURE_APPLE_object_purgeable
+ functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
+ functions->TextureObjectPurgeable = intel_texture_object_purgeable;
+ functions->RenderObjectPurgeable = intel_render_object_purgeable;
+
+ functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
+ functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
+ functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
+#endif
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffers.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffers.c
index c86daa49f47..9a9a259c9e5 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffers.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_buffers.c
@@ -1 +1,132 @@
-../intel/intel_buffers.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+
+/**
+ * Return pointer to current color drawing region, or NULL.
+ */
+struct intel_region *
+intel_drawbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irbColor =
+ intel_renderbuffer(intel->ctx.DrawBuffer->_ColorDrawBuffers[0]);
+ if (irbColor && irbColor->mt)
+ return irbColor->mt->region;
+ else
+ return NULL;
+}
+
+/**
+ * Return pointer to current color reading region, or NULL.
+ */
+struct intel_region *
+intel_readbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irb
+ = intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
+ if (irb && irb->mt)
+ return irb->mt->region;
+ else
+ return NULL;
+}
+
+/**
+ * Check if we're about to draw into the front color buffer.
+ * If so, set the intel->front_buffer_dirty field to true.
+ */
+void
+intel_check_front_buffer_rendering(struct intel_context *intel)
+{
+ const struct gl_framebuffer *fb = intel->ctx.DrawBuffer;
+ if (_mesa_is_winsys_fbo(fb)) {
+ /* drawing to window system buffer */
+ if (fb->_NumColorDrawBuffers > 0) {
+ if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
+ intel->front_buffer_dirty = true;
+ }
+ }
+ }
+}
+
+static void
+intelDrawBuffer(struct gl_context * ctx, GLenum mode)
+{
+ if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ struct intel_context *const intel = intel_context(ctx);
+ const bool was_front_buffer_rendering =
+ intel->is_front_buffer_rendering;
+
+ intel->is_front_buffer_rendering = (mode == GL_FRONT_LEFT)
+ || (mode == GL_FRONT) || (mode == GL_FRONT_AND_BACK);
+
+ /* If we weren't front-buffer rendering before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start rendering again.
+ */
+ if (!was_front_buffer_rendering && intel->is_front_buffer_rendering)
+ dri2InvalidateDrawable(intel->driContext->driDrawablePriv);
+ }
+
+ intel_draw_buffer(ctx);
+}
+
+
+static void
+intelReadBuffer(struct gl_context * ctx, GLenum mode)
+{
+ if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ struct intel_context *const intel = intel_context(ctx);
+ const bool was_front_buffer_reading =
+ intel->is_front_buffer_reading;
+
+ intel->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
+ || (mode == GL_FRONT);
+
+ /* If we weren't front-buffer reading before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start reading again.
+ */
+ if (!was_front_buffer_reading && intel->is_front_buffer_reading)
+ dri2InvalidateDrawable(intel->driContext->driReadablePriv);
+ }
+}
+
+
+void
+intelInitBufferFuncs(struct dd_function_table *functions)
+{
+ functions->DrawBuffer = intelDrawBuffer;
+ functions->ReadBuffer = intelReadBuffer;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_context.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_context.c
index 27a1cbb255e..25334da33f7 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_context.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_context.c
@@ -1 +1,1030 @@
-../intel/intel_context.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/extensions.h"
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/imports.h"
+#include "main/points.h"
+#include "main/renderbuffer.h"
+
+#include "swrast/swrast.h"
+#include "swrast_setup/swrast_setup.h"
+#include "tnl/tnl.h"
+#include "drivers/common/driverfuncs.h"
+#include "drivers/common/meta.h"
+
+#include "intel_chipset.h"
+#include "intel_buffers.h"
+#include "intel_tex.h"
+#include "intel_batchbuffer.h"
+#include "intel_clear.h"
+#include "intel_extensions.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+#include "intel_buffer_objects.h"
+#include "intel_fbo.h"
+#include "intel_bufmgr.h"
+#include "intel_screen.h"
+#include "intel_mipmap_tree.h"
+
+#include "utils.h"
+#include "../glsl/ralloc.h"
+
+#ifndef INTEL_DEBUG
+int INTEL_DEBUG = (0);
+#endif
+
+
+static const GLubyte *
+intelGetString(struct gl_context * ctx, GLenum name)
+{
+ const struct intel_context *const intel = intel_context(ctx);
+ const char *chipset;
+ static char buffer[128];
+
+ switch (name) {
+ case GL_VENDOR:
+ return (GLubyte *) "Intel Open Source Technology Center";
+ break;
+
+ case GL_RENDERER:
+ switch (intel->intelScreen->deviceID) {
+ case PCI_CHIP_845_G:
+ chipset = "Intel(R) 845G";
+ break;
+ case PCI_CHIP_I830_M:
+ chipset = "Intel(R) 830M";
+ break;
+ case PCI_CHIP_I855_GM:
+ chipset = "Intel(R) 852GM/855GM";
+ break;
+ case PCI_CHIP_I865_G:
+ chipset = "Intel(R) 865G";
+ break;
+ case PCI_CHIP_I915_G:
+ chipset = "Intel(R) 915G";
+ break;
+ case PCI_CHIP_E7221_G:
+ chipset = "Intel (R) E7221G (i915)";
+ break;
+ case PCI_CHIP_I915_GM:
+ chipset = "Intel(R) 915GM";
+ break;
+ case PCI_CHIP_I945_G:
+ chipset = "Intel(R) 945G";
+ break;
+ case PCI_CHIP_I945_GM:
+ chipset = "Intel(R) 945GM";
+ break;
+ case PCI_CHIP_I945_GME:
+ chipset = "Intel(R) 945GME";
+ break;
+ case PCI_CHIP_G33_G:
+ chipset = "Intel(R) G33";
+ break;
+ case PCI_CHIP_Q35_G:
+ chipset = "Intel(R) Q35";
+ break;
+ case PCI_CHIP_Q33_G:
+ chipset = "Intel(R) Q33";
+ break;
+ case PCI_CHIP_IGD_GM:
+ case PCI_CHIP_IGD_G:
+ chipset = "Intel(R) IGD";
+ break;
+ case PCI_CHIP_I965_Q:
+ chipset = "Intel(R) 965Q";
+ break;
+ case PCI_CHIP_I965_G:
+ case PCI_CHIP_I965_G_1:
+ chipset = "Intel(R) 965G";
+ break;
+ case PCI_CHIP_I946_GZ:
+ chipset = "Intel(R) 946GZ";
+ break;
+ case PCI_CHIP_I965_GM:
+ chipset = "Intel(R) 965GM";
+ break;
+ case PCI_CHIP_I965_GME:
+ chipset = "Intel(R) 965GME/GLE";
+ break;
+ case PCI_CHIP_GM45_GM:
+ chipset = "Mobile Intel® GM45 Express Chipset";
+ break;
+ case PCI_CHIP_IGD_E_G:
+ chipset = "Intel(R) Integrated Graphics Device";
+ break;
+ case PCI_CHIP_G45_G:
+ chipset = "Intel(R) G45/G43";
+ break;
+ case PCI_CHIP_Q45_G:
+ chipset = "Intel(R) Q45/Q43";
+ break;
+ case PCI_CHIP_G41_G:
+ chipset = "Intel(R) G41";
+ break;
+ case PCI_CHIP_B43_G:
+ case PCI_CHIP_B43_G1:
+ chipset = "Intel(R) B43";
+ break;
+ case PCI_CHIP_ILD_G:
+ chipset = "Intel(R) Ironlake Desktop";
+ break;
+ case PCI_CHIP_ILM_G:
+ chipset = "Intel(R) Ironlake Mobile";
+ break;
+ case PCI_CHIP_SANDYBRIDGE_GT1:
+ case PCI_CHIP_SANDYBRIDGE_GT2:
+ case PCI_CHIP_SANDYBRIDGE_GT2_PLUS:
+ chipset = "Intel(R) Sandybridge Desktop";
+ break;
+ case PCI_CHIP_SANDYBRIDGE_M_GT1:
+ case PCI_CHIP_SANDYBRIDGE_M_GT2:
+ case PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS:
+ chipset = "Intel(R) Sandybridge Mobile";
+ break;
+ case PCI_CHIP_SANDYBRIDGE_S:
+ chipset = "Intel(R) Sandybridge Server";
+ break;
+ case PCI_CHIP_IVYBRIDGE_GT1:
+ case PCI_CHIP_IVYBRIDGE_GT2:
+ chipset = "Intel(R) Ivybridge Desktop";
+ break;
+ case PCI_CHIP_IVYBRIDGE_M_GT1:
+ case PCI_CHIP_IVYBRIDGE_M_GT2:
+ chipset = "Intel(R) Ivybridge Mobile";
+ break;
+ case PCI_CHIP_IVYBRIDGE_S_GT1:
+ case PCI_CHIP_IVYBRIDGE_S_GT2:
+ chipset = "Intel(R) Ivybridge Server";
+ break;
+ case PCI_CHIP_HASWELL_GT1:
+ case PCI_CHIP_HASWELL_GT2:
+ case PCI_CHIP_HASWELL_GT2_PLUS:
+ case PCI_CHIP_HASWELL_SDV_GT1:
+ case PCI_CHIP_HASWELL_SDV_GT2:
+ case PCI_CHIP_HASWELL_SDV_GT2_PLUS:
+ case PCI_CHIP_HASWELL_ULT_GT1:
+ case PCI_CHIP_HASWELL_ULT_GT2:
+ case PCI_CHIP_HASWELL_ULT_GT2_PLUS:
+ case PCI_CHIP_HASWELL_CRW_GT1:
+ case PCI_CHIP_HASWELL_CRW_GT2:
+ case PCI_CHIP_HASWELL_CRW_GT2_PLUS:
+ chipset = "Intel(R) Haswell Desktop";
+ break;
+ case PCI_CHIP_HASWELL_M_GT1:
+ case PCI_CHIP_HASWELL_M_GT2:
+ case PCI_CHIP_HASWELL_M_GT2_PLUS:
+ case PCI_CHIP_HASWELL_SDV_M_GT1:
+ case PCI_CHIP_HASWELL_SDV_M_GT2:
+ case PCI_CHIP_HASWELL_SDV_M_GT2_PLUS:
+ case PCI_CHIP_HASWELL_ULT_M_GT1:
+ case PCI_CHIP_HASWELL_ULT_M_GT2:
+ case PCI_CHIP_HASWELL_ULT_M_GT2_PLUS:
+ case PCI_CHIP_HASWELL_CRW_M_GT1:
+ case PCI_CHIP_HASWELL_CRW_M_GT2:
+ case PCI_CHIP_HASWELL_CRW_M_GT2_PLUS:
+ chipset = "Intel(R) Haswell Mobile";
+ break;
+ case PCI_CHIP_HASWELL_S_GT1:
+ case PCI_CHIP_HASWELL_S_GT2:
+ case PCI_CHIP_HASWELL_S_GT2_PLUS:
+ case PCI_CHIP_HASWELL_SDV_S_GT1:
+ case PCI_CHIP_HASWELL_SDV_S_GT2:
+ case PCI_CHIP_HASWELL_SDV_S_GT2_PLUS:
+ case PCI_CHIP_HASWELL_ULT_S_GT1:
+ case PCI_CHIP_HASWELL_ULT_S_GT2:
+ case PCI_CHIP_HASWELL_ULT_S_GT2_PLUS:
+ case PCI_CHIP_HASWELL_CRW_S_GT1:
+ case PCI_CHIP_HASWELL_CRW_S_GT2:
+ case PCI_CHIP_HASWELL_CRW_S_GT2_PLUS:
+ chipset = "Intel(R) Haswell Server";
+ break;
+ default:
+ chipset = "Unknown Intel Chipset";
+ break;
+ }
+
+ (void) driGetRendererString(buffer, chipset, 0);
+ return (GLubyte *) buffer;
+
+ default:
+ return NULL;
+ }
+}
+
+void
+intel_downsample_for_dri2_flush(struct intel_context *intel,
+ __DRIdrawable *drawable)
+{
+ if (intel->gen < 6) {
+ /* MSAA is not supported, so don't waste time checking for
+ * a multisample buffer.
+ */
+ return;
+ }
+
+ struct gl_framebuffer *fb = drawable->driverPrivate;
+ struct intel_renderbuffer *rb;
+
+ /* Usually, only the back buffer will need to be downsampled. However,
+ * the front buffer will also need it if the user has rendered into it.
+ */
+ static const gl_buffer_index buffers[2] = {
+ BUFFER_BACK_LEFT,
+ BUFFER_FRONT_LEFT,
+ };
+
+ for (int i = 0; i < 2; ++i) {
+ rb = intel_get_renderbuffer(fb, buffers[i]);
+ if (rb == NULL || rb->mt == NULL)
+ continue;
+ intel_miptree_downsample(intel, rb->mt);
+ }
+}
+
+static void
+intel_flush_front(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIcontext *driContext = intel->driContext;
+ __DRIdrawable *driDrawable = driContext->driDrawablePriv;
+ __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
+
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && intel->front_buffer_dirty) {
+ if (screen->dri2.loader->flushFrontBuffer != NULL &&
+ driDrawable &&
+ driDrawable->loaderPrivate) {
+
+ /* Downsample before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
+ *
+ * This potentially downsamples both front and back buffer. It
+ * is unnecessary to downsample the back, but harms nothing except
+ * performance. And no one cares about front-buffer render
+ * performance.
+ */
+ intel_downsample_for_dri2_flush(intel, driDrawable);
+
+ screen->dri2.loader->flushFrontBuffer(driDrawable,
+ driDrawable->loaderPrivate);
+
+ /* We set the dirty bit in intel_prepare_render() if we're
+ * front buffer rendering once we get there.
+ */
+ intel->front_buffer_dirty = false;
+ }
+ }
+}
+
+static unsigned
+intel_bits_per_pixel(const struct intel_renderbuffer *rb)
+{
+ return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
+}
+
+static void
+intel_query_dri2_buffers(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer **buffers,
+ int *count);
+
+static void
+intel_process_dri2_buffer(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer *buffer,
+ struct intel_renderbuffer *rb,
+ const char *buffer_name);
+
+void
+intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
+{
+ struct gl_framebuffer *fb = drawable->driverPrivate;
+ struct intel_renderbuffer *rb;
+ struct intel_context *intel = context->driverPrivate;
+ __DRIbuffer *buffers = NULL;
+ int i, count;
+ const char *region_name;
+
+ /* If we're rendering to the fake front buffer, make sure all the
+ * pending drawing has landed on the real front buffer. Otherwise
+ * when we eventually get to DRI2GetBuffersWithFormat the stale
+ * real front buffer contents will get copied to the new fake front
+ * buffer.
+ */
+ if (intel->is_front_buffer_rendering) {
+ intel_flush(&intel->ctx);
+ intel_flush_front(&intel->ctx);
+ }
+
+ /* Set this up front, so that in case our buffers get invalidated
+ * while we're getting new buffers, we don't clobber the stamp and
+ * thus ignore the invalidate. */
+ drawable->lastStamp = drawable->dri2.stamp;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_DRI))
+ fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
+
+ intel_query_dri2_buffers(intel, drawable, &buffers, &count);
+
+ if (buffers == NULL)
+ return;
+
+ for (i = 0; i < count; i++) {
+ switch (buffers[i].attachment) {
+ case __DRI_BUFFER_FRONT_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ region_name = "dri2 front buffer";
+ break;
+
+ case __DRI_BUFFER_FAKE_FRONT_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ region_name = "dri2 fake front buffer";
+ break;
+
+ case __DRI_BUFFER_BACK_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
+ region_name = "dri2 back buffer";
+ break;
+
+ case __DRI_BUFFER_DEPTH:
+ case __DRI_BUFFER_HIZ:
+ case __DRI_BUFFER_DEPTH_STENCIL:
+ case __DRI_BUFFER_STENCIL:
+ case __DRI_BUFFER_ACCUM:
+ default:
+ fprintf(stderr,
+ "unhandled buffer attach event, attachment type %d\n",
+ buffers[i].attachment);
+ return;
+ }
+
+ intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
+ }
+
+ driUpdateFramebufferSize(&intel->ctx, drawable);
+}
+
+/**
+ * intel_prepare_render should be called anywhere that curent read/drawbuffer
+ * state is required.
+ */
+void
+intel_prepare_render(struct intel_context *intel)
+{
+ __DRIcontext *driContext = intel->driContext;
+ __DRIdrawable *drawable;
+
+ drawable = driContext->driDrawablePriv;
+ if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ intel_draw_buffer(&intel->ctx);
+ driContext->dri2.draw_stamp = drawable->dri2.stamp;
+ }
+
+ drawable = driContext->driReadablePriv;
+ if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ driContext->dri2.read_stamp = drawable->dri2.stamp;
+ }
+
+ /* If we're currently rendering to the front buffer, the rendering
+ * that will happen next will probably dirty the front buffer. So
+ * mark it as dirty here.
+ */
+ if (intel->is_front_buffer_rendering)
+ intel->front_buffer_dirty = true;
+
+ /* Wait for the swapbuffers before the one we just emitted, so we
+ * don't get too many swaps outstanding for apps that are GPU-heavy
+ * but not CPU-heavy.
+ *
+ * We're using intelDRI2Flush (called from the loader before
+ * swapbuffer) and glFlush (for front buffer rendering) as the
+ * indicator that a frame is done and then throttle when we get
+ * here as we prepare to render the next frame. At this point for
+ * round trips for swap/copy and getting new buffers are done and
+ * we'll spend less time waiting on the GPU.
+ *
+ * Unfortunately, we don't have a handle to the batch containing
+ * the swap, and getting our hands on that doesn't seem worth it,
+ * so we just us the first batch we emitted after the last swap.
+ */
+ if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
+ drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
+ drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
+ intel->first_post_swapbuffers_batch = NULL;
+ intel->need_throttle = false;
+ }
+}
+
+static void
+intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIcontext *driContext = intel->driContext;
+
+ if (intel->saved_viewport)
+ intel->saved_viewport(ctx, x, y, w, h);
+
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ dri2InvalidateDrawable(driContext->driDrawablePriv);
+ dri2InvalidateDrawable(driContext->driReadablePriv);
+ }
+}
+
+static const struct dri_debug_control debug_control[] = {
+ { "tex", DEBUG_TEXTURE},
+ { "state", DEBUG_STATE},
+ { "ioctl", DEBUG_IOCTL},
+ { "blit", DEBUG_BLIT},
+ { "mip", DEBUG_MIPTREE},
+ { "fall", DEBUG_PERF},
+ { "perf", DEBUG_PERF},
+ { "verb", DEBUG_VERBOSE},
+ { "bat", DEBUG_BATCH},
+ { "pix", DEBUG_PIXEL},
+ { "buf", DEBUG_BUFMGR},
+ { "reg", DEBUG_REGION},
+ { "fbo", DEBUG_FBO},
+ { "gs", DEBUG_GS},
+ { "sync", DEBUG_SYNC},
+ { "prim", DEBUG_PRIMS },
+ { "vert", DEBUG_VERTS },
+ { "dri", DEBUG_DRI },
+ { "sf", DEBUG_SF },
+ { "san", DEBUG_SANITY },
+ { "sleep", DEBUG_SLEEP },
+ { "stats", DEBUG_STATS },
+ { "tile", DEBUG_TILE },
+ { "wm", DEBUG_WM },
+ { "urb", DEBUG_URB },
+ { "vs", DEBUG_VS },
+ { "clip", DEBUG_CLIP },
+ { "aub", DEBUG_AUB },
+ { NULL, 0 }
+};
+
+
+static void
+intelInvalidateState(struct gl_context * ctx, GLuint new_state)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (ctx->swrast_context)
+ _swrast_InvalidateState(ctx, new_state);
+ _vbo_InvalidateState(ctx, new_state);
+
+ intel->NewGLState |= new_state;
+
+ if (intel->vtbl.invalidate_state)
+ intel->vtbl.invalidate_state( intel, new_state );
+}
+
+void
+intel_flush_rendering_to_batch(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (intel->Fallback)
+ _swrast_flush(ctx);
+
+ if (intel->gen < 4)
+ INTEL_FIREVERTICES(intel);
+}
+
+void
+_intel_flush(struct gl_context *ctx, const char *file, int line)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush_rendering_to_batch(ctx);
+
+ if (intel->batch.used)
+ _intel_batchbuffer_flush(intel, file, line);
+}
+
+static void
+intel_glFlush(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush(ctx);
+ intel_flush_front(ctx);
+ if (intel->is_front_buffer_rendering)
+ intel->need_throttle = true;
+}
+
+void
+intelFinish(struct gl_context * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush(ctx);
+ intel_flush_front(ctx);
+
+ if (intel->batch.last_bo)
+ drm_intel_bo_wait_rendering(intel->batch.last_bo);
+}
+
+void
+intelInitDriverFunctions(struct dd_function_table *functions)
+{
+ _mesa_init_driver_functions(functions);
+
+ functions->Flush = intel_glFlush;
+ functions->Finish = intelFinish;
+ functions->GetString = intelGetString;
+ functions->UpdateState = intelInvalidateState;
+
+ intelInitTextureFuncs(functions);
+ intelInitTextureImageFuncs(functions);
+ intelInitTextureSubImageFuncs(functions);
+ intelInitTextureCopyImageFuncs(functions);
+ intelInitClearFuncs(functions);
+ intelInitBufferFuncs(functions);
+ intelInitPixelFuncs(functions);
+ intelInitBufferObjectFuncs(functions);
+ intel_init_syncobj_functions(functions);
+}
+
+bool
+intelInitContext(struct intel_context *intel,
+ int api,
+ const struct gl_config * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate,
+ struct dd_function_table *functions)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ struct intel_screen *intelScreen = sPriv->driverPrivate;
+ int bo_reuse_mode;
+ struct gl_config visual;
+
+ /* we can't do anything without a connection to the device */
+ if (intelScreen->bufmgr == NULL)
+ return false;
+
+ /* Can't rely on invalidate events, fall back to glViewport hack */
+ if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
+ intel->saved_viewport = functions->Viewport;
+ functions->Viewport = intel_viewport;
+ }
+
+ if (mesaVis == NULL) {
+ memset(&visual, 0, sizeof visual);
+ mesaVis = &visual;
+ }
+
+ if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
+ functions, (void *) intel)) {
+ printf("%s: failed to init mesa context\n", __FUNCTION__);
+ return false;
+ }
+
+ driContextPriv->driverPrivate = intel;
+ intel->intelScreen = intelScreen;
+ intel->driContext = driContextPriv;
+ intel->driFd = sPriv->fd;
+
+ intel->gen = intelScreen->gen;
+
+ const int devID = intelScreen->deviceID;
+ if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID))
+ intel->gt = 1;
+ else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID))
+ intel->gt = 2;
+ else
+ intel->gt = 0;
+
+ if (IS_HASWELL(devID)) {
+ intel->is_haswell = true;
+ } else if (IS_G4X(devID)) {
+ intel->is_g4x = true;
+ } else if (IS_945(devID)) {
+ intel->is_945 = true;
+ }
+
+ if (intel->gen >= 5) {
+ intel->needs_ff_sync = true;
+ }
+
+ intel->has_separate_stencil = intel->intelScreen->hw_has_separate_stencil;
+ intel->must_use_separate_stencil = intel->intelScreen->hw_must_use_separate_stencil;
+ intel->has_hiz = intel->gen >= 6 && !intel->is_haswell;
+ intel->has_llc = intel->intelScreen->hw_has_llc;
+ intel->has_swizzling = intel->intelScreen->hw_has_swizzling;
+
+ memset(&ctx->TextureFormatSupported,
+ 0, sizeof(ctx->TextureFormatSupported));
+
+ driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
+ sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
+ if (intel->gen < 4)
+ intel->maxBatchSize = 4096;
+ else
+ intel->maxBatchSize = sizeof(intel->batch.map);
+
+ intel->bufmgr = intelScreen->bufmgr;
+
+ bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
+ switch (bo_reuse_mode) {
+ case DRI_CONF_BO_REUSE_DISABLED:
+ break;
+ case DRI_CONF_BO_REUSE_ALL:
+ intel_bufmgr_gem_enable_reuse(intel->bufmgr);
+ break;
+ }
+
+ ctx->Const.MinLineWidth = 1.0;
+ ctx->Const.MinLineWidthAA = 1.0;
+ ctx->Const.MaxLineWidth = 5.0;
+ ctx->Const.MaxLineWidthAA = 5.0;
+ ctx->Const.LineWidthGranularity = 0.5;
+
+ ctx->Const.MinPointSize = 1.0;
+ ctx->Const.MinPointSizeAA = 1.0;
+ ctx->Const.MaxPointSize = 255.0;
+ ctx->Const.MaxPointSizeAA = 3.0;
+ ctx->Const.PointSizeGranularity = 1.0;
+
+ ctx->Const.MaxSamples = 1.0;
+
+ if (intel->gen >= 6)
+ ctx->Const.MaxClipPlanes = 8;
+
+ ctx->Const.StripTextureBorder = GL_TRUE;
+
+ /* reinitialize the context point state.
+ * It depend on constants in __struct gl_contextRec::Const
+ */
+ _mesa_init_point(ctx);
+
+ if (intel->gen >= 4) {
+ ctx->Const.MaxRenderbufferSize = 8192;
+ } else {
+ ctx->Const.MaxRenderbufferSize = 2048;
+ }
+
+ /* Initialize the software rasterizer and helper modules.
+ *
+ * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
+ * software fallbacks (which we have to support on legacy GL to do weird
+ * glDrawPixels(), glBitmap(), and other functions).
+ */
+ if (intel->gen <= 3 || api != API_OPENGL_CORE) {
+ _swrast_CreateContext(ctx);
+ }
+
+ _vbo_CreateContext(ctx);
+ if (ctx->swrast_context) {
+ _tnl_CreateContext(ctx);
+ _swsetup_CreateContext(ctx);
+
+ /* Configure swrast to match hardware characteristics: */
+ _swrast_allow_pixel_fog(ctx, false);
+ _swrast_allow_vertex_fog(ctx, true);
+ }
+
+ _mesa_meta_init(ctx);
+
+ intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
+ intel->hw_stipple = 1;
+
+ /* XXX FBO: this doesn't seem to be used anywhere */
+ switch (mesaVis->depthBits) {
+ case 0: /* what to do in this case? */
+ case 16:
+ intel->polygon_offset_scale = 1.0;
+ break;
+ case 24:
+ intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (intel->gen >= 4)
+ intel->polygon_offset_scale /= 0xffff;
+
+ intel->RenderIndex = ~0;
+
+ intelInitExtensions(ctx);
+
+ INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
+ if (INTEL_DEBUG & DEBUG_BUFMGR)
+ dri_bufmgr_set_debug(intel->bufmgr, true);
+
+ if (INTEL_DEBUG & DEBUG_AUB)
+ drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
+
+ intel_batchbuffer_init(intel);
+
+ intel_fbo_init(intel);
+
+ intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
+ "texture_tiling");
+ intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
+
+ if (!driQueryOptionb(&intel->optionCache, "hiz")) {
+ intel->has_hiz = false;
+ /* On gen6, you can only do separate stencil with HIZ. */
+ if (intel->gen == 6)
+ intel->has_separate_stencil = false;
+ }
+
+ intel->prim.primitive = ~0;
+
+ /* Force all software fallbacks */
+#ifdef I915
+ if (driQueryOptionb(&intel->optionCache, "no_rast")) {
+ fprintf(stderr, "disabling 3D rasterization\n");
+ intel->no_rast = 1;
+ }
+#endif
+
+ if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
+ fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
+ intel->always_flush_batch = 1;
+ }
+
+ if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
+ fprintf(stderr, "flushing GPU caches before/after each draw call\n");
+ intel->always_flush_cache = 1;
+ }
+
+ return true;
+}
+
+void
+intelDestroyContext(__DRIcontext * driContextPriv)
+{
+ struct intel_context *intel =
+ (struct intel_context *) driContextPriv->driverPrivate;
+ struct gl_context *ctx = &intel->ctx;
+
+ assert(intel); /* should never be null */
+ if (intel) {
+ INTEL_FIREVERTICES(intel);
+
+ /* Dump a final BMP in case the application doesn't call SwapBuffers */
+ if (INTEL_DEBUG & DEBUG_AUB) {
+ intel_batchbuffer_flush(intel);
+ aub_dump_bmp(&intel->ctx);
+ }
+
+ _mesa_meta_free(&intel->ctx);
+
+ intel->vtbl.destroy(intel);
+
+ if (ctx->swrast_context) {
+ _swsetup_DestroyContext(&intel->ctx);
+ _tnl_DestroyContext(&intel->ctx);
+ }
+ _vbo_DestroyContext(&intel->ctx);
+
+ if (ctx->swrast_context)
+ _swrast_DestroyContext(&intel->ctx);
+ intel->Fallback = 0x0; /* don't call _swrast_Flush later */
+
+ intel_batchbuffer_free(intel);
+
+ free(intel->prim.vb);
+ intel->prim.vb = NULL;
+ drm_intel_bo_unreference(intel->prim.vb_bo);
+ intel->prim.vb_bo = NULL;
+ drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
+ intel->first_post_swapbuffers_batch = NULL;
+
+ driDestroyOptionCache(&intel->optionCache);
+
+ /* free the Mesa context */
+ _mesa_free_context_data(&intel->ctx);
+
+ _math_matrix_dtr(&intel->ViewportMatrix);
+
+ ralloc_free(intel);
+ driContextPriv->driverPrivate = NULL;
+ }
+}
+
+GLboolean
+intelUnbindContext(__DRIcontext * driContextPriv)
+{
+ /* Unset current context and dispath table */
+ _mesa_make_current(NULL, NULL, NULL);
+
+ return true;
+}
+
+GLboolean
+intelMakeCurrent(__DRIcontext * driContextPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv)
+{
+ struct intel_context *intel;
+ GET_CURRENT_CONTEXT(curCtx);
+
+ if (driContextPriv)
+ intel = (struct intel_context *) driContextPriv->driverPrivate;
+ else
+ intel = NULL;
+
+ /* According to the glXMakeCurrent() man page: "Pending commands to
+ * the previous context, if any, are flushed before it is released."
+ * But only flush if we're actually changing contexts.
+ */
+ if (intel_context(curCtx) && intel_context(curCtx) != intel) {
+ _mesa_flush(curCtx);
+ }
+
+ if (driContextPriv) {
+ struct gl_framebuffer *fb, *readFb;
+
+ if (driDrawPriv == NULL && driReadPriv == NULL) {
+ fb = _mesa_get_incomplete_framebuffer();
+ readFb = _mesa_get_incomplete_framebuffer();
+ } else {
+ fb = driDrawPriv->driverPrivate;
+ readFb = driReadPriv->driverPrivate;
+ driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
+ driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
+ }
+
+ intel_prepare_render(intel);
+ _mesa_make_current(&intel->ctx, fb, readFb);
+
+ /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
+ * is NULL at that point. We can't call _mesa_makecurrent()
+ * first, since we need the buffer size for the initial
+ * viewport. So just call intel_draw_buffer() again here. */
+ intel_draw_buffer(&intel->ctx);
+ }
+ else {
+ _mesa_make_current(NULL, NULL, NULL);
+ }
+
+ return true;
+}
+
+/**
+ * \brief Query DRI2 to obtain a DRIdrawable's buffers.
+ *
+ * To determine which DRI buffers to request, examine the renderbuffers
+ * attached to the drawable's framebuffer. Then request the buffers with
+ * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
+ *
+ * This is called from intel_update_renderbuffers().
+ *
+ * \param drawable Drawable whose buffers are queried.
+ * \param buffers [out] List of buffers returned by DRI2 query.
+ * \param buffer_count [out] Number of buffers returned.
+ *
+ * \see intel_update_renderbuffers()
+ * \see DRI2GetBuffers()
+ * \see DRI2GetBuffersWithFormat()
+ */
+static void
+intel_query_dri2_buffers(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer **buffers,
+ int *buffer_count)
+{
+ __DRIscreen *screen = intel->intelScreen->driScrnPriv;
+ struct gl_framebuffer *fb = drawable->driverPrivate;
+ int i = 0;
+ const int max_attachments = 4;
+ unsigned *attachments = calloc(2 * max_attachments, sizeof(unsigned));
+
+ struct intel_renderbuffer *front_rb;
+ struct intel_renderbuffer *back_rb;
+
+ front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
+
+ if ((intel->is_front_buffer_rendering ||
+ intel->is_front_buffer_reading ||
+ !back_rb) && front_rb) {
+ attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
+ attachments[i++] = intel_bits_per_pixel(front_rb);
+ }
+
+ if (back_rb) {
+ attachments[i++] = __DRI_BUFFER_BACK_LEFT;
+ attachments[i++] = intel_bits_per_pixel(back_rb);
+ }
+
+ assert(i <= 2 * max_attachments);
+
+ *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
+ &drawable->w,
+ &drawable->h,
+ attachments, i / 2,
+ buffer_count,
+ drawable->loaderPrivate);
+ free(attachments);
+}
+
+/**
+ * \brief Assign a DRI buffer's DRM region to a renderbuffer.
+ *
+ * This is called from intel_update_renderbuffers().
+ *
+ * \par Note:
+ * DRI buffers whose attachment point is DRI2BufferStencil or
+ * DRI2BufferDepthStencil are handled as special cases.
+ *
+ * \param buffer_name is a human readable name, such as "dri2 front buffer",
+ * that is passed to intel_region_alloc_for_handle().
+ *
+ * \see intel_update_renderbuffers()
+ * \see intel_region_alloc_for_handle()
+ */
+static void
+intel_process_dri2_buffer(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer *buffer,
+ struct intel_renderbuffer *rb,
+ const char *buffer_name)
+{
+ struct intel_region *region = NULL;
+
+ if (!rb)
+ return;
+
+ unsigned num_samples = rb->Base.Base.NumSamples;
+
+ /* We try to avoid closing and reopening the same BO name, because the first
+ * use of a mapping of the buffer involves a bunch of page faulting which is
+ * moderately expensive.
+ */
+ if (num_samples == 0) {
+ if (rb->mt &&
+ rb->mt->region &&
+ rb->mt->region->name == buffer->name)
+ return;
+ } else {
+ if (rb->mt &&
+ rb->mt->singlesample_mt &&
+ rb->mt->singlesample_mt->region &&
+ rb->mt->singlesample_mt->region->name == buffer->name)
+ return;
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
+ fprintf(stderr,
+ "attaching buffer %d, at %d, cpp %d, pitch %d\n",
+ buffer->name, buffer->attachment,
+ buffer->cpp, buffer->pitch);
+ }
+
+ intel_miptree_release(&rb->mt);
+ region = intel_region_alloc_for_handle(intel->intelScreen,
+ buffer->cpp,
+ drawable->w,
+ drawable->h,
+ buffer->pitch / buffer->cpp,
+ buffer->name,
+ buffer_name);
+ if (!region)
+ return;
+
+ rb->mt = intel_miptree_create_for_dri2_buffer(intel,
+ buffer->attachment,
+ intel_rb_format(rb),
+ num_samples,
+ region);
+ intel_region_release(&region);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_extensions.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_extensions.c
index a2f3e8cd208..885e8a43e80 120000..100755
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_extensions.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_extensions.c
@@ -1 +1,197 @@
-../intel/intel_extensions.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mfeatures.h"
+#include "main/version.h"
+
+#include "intel_chipset.h"
+#include "intel_context.h"
+#include "intel_extensions.h"
+#include "intel_reg.h"
+#include "utils.h"
+
+/**
+ * Initializes potential list of extensions if ctx == NULL, or actually enables
+ * extensions for a context.
+ */
+void
+intelInitExtensions(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ ctx->Extensions.ARB_draw_elements_base_vertex = true;
+ ctx->Extensions.ARB_explicit_attrib_location = true;
+ if (_mesa_is_desktop_gl(ctx))
+ ctx->Extensions.ARB_framebuffer_object = true;
+ ctx->Extensions.ARB_half_float_pixel = true;
+ ctx->Extensions.ARB_map_buffer_range = true;
+ ctx->Extensions.ARB_point_sprite = true;
+ ctx->Extensions.ARB_shader_objects = true;
+ ctx->Extensions.ARB_shading_language_100 = true;
+ ctx->Extensions.ARB_sync = true;
+ ctx->Extensions.ARB_texture_border_clamp = true;
+ ctx->Extensions.ARB_texture_cube_map = true;
+ ctx->Extensions.ARB_texture_env_combine = true;
+ ctx->Extensions.ARB_texture_env_crossbar = true;
+ ctx->Extensions.ARB_texture_env_dot3 = true;
+ ctx->Extensions.ARB_texture_storage = true;
+ ctx->Extensions.ARB_vertex_program = true;
+ ctx->Extensions.ARB_vertex_shader = true;
+ ctx->Extensions.EXT_blend_color = true;
+ ctx->Extensions.EXT_blend_equation_separate = true;
+ ctx->Extensions.EXT_blend_func_separate = true;
+ ctx->Extensions.EXT_blend_minmax = true;
+ ctx->Extensions.EXT_framebuffer_blit = true;
+ ctx->Extensions.EXT_framebuffer_object = true;
+ ctx->Extensions.EXT_framebuffer_multisample = true;
+ ctx->Extensions.EXT_fog_coord = true;
+ ctx->Extensions.EXT_gpu_program_parameters = true;
+ ctx->Extensions.EXT_packed_depth_stencil = true;
+ ctx->Extensions.EXT_pixel_buffer_object = true;
+ ctx->Extensions.EXT_point_parameters = true;
+ ctx->Extensions.EXT_provoking_vertex = true;
+ ctx->Extensions.EXT_secondary_color = true;
+ ctx->Extensions.EXT_separate_shader_objects = true;
+ ctx->Extensions.EXT_texture_env_dot3 = true;
+ ctx->Extensions.EXT_texture_filter_anisotropic = true;
+ ctx->Extensions.APPLE_object_purgeable = true;
+ ctx->Extensions.MESA_pack_invert = true;
+ ctx->Extensions.MESA_ycbcr_texture = true;
+ ctx->Extensions.NV_blend_square = true;
+ ctx->Extensions.NV_texture_rectangle = true;
+ ctx->Extensions.NV_vertex_program = true;
+ ctx->Extensions.NV_vertex_program1_1 = true;
+ ctx->Extensions.TDFX_texture_compression_FXT1 = true;
+#if FEATURE_OES_EGL_image
+ ctx->Extensions.OES_EGL_image = true;
+#endif
+ ctx->Extensions.OES_draw_texture = true;
+ ctx->Extensions.OES_compressed_ETC1_RGB8_texture = true;
+ ctx->Extensions.ARB_texture_rgb10_a2ui = true;
+
+ if (intel->gen >= 6)
+ if (ctx->API == API_OPENGL_CORE) {
+ ctx->Const.GLSLVersion = 140;
+ } else {
+ ctx->Const.GLSLVersion = 130;
+ }
+ else
+ ctx->Const.GLSLVersion = 120;
+ _mesa_override_glsl_version(ctx);
+
+ if (intel->gen == 6 ||
+ (intel->gen == 7 && intel->intelScreen->kernel_has_gen7_sol_reset))
+ ctx->Extensions.EXT_transform_feedback = true;
+
+ if (intel->gen >= 6) {
+ ctx->Extensions.ARB_blend_func_extended = !driQueryOptionb(&intel->optionCache, "disable_blend_func_extended");
+ ctx->Extensions.ARB_draw_buffers_blend = true;
+ ctx->Extensions.ARB_uniform_buffer_object = true;
+
+ if (ctx->API == API_OPENGL_CORE) {
+ ctx->Extensions.ARB_texture_buffer_object = true;
+ }
+ }
+
+ if (intel->gen >= 5)
+ ctx->Extensions.EXT_timer_query = true;
+
+ if (intel->gen >= 6) {
+ uint64_t dummy;
+ /* Test if the kernel has the ioctl. */
+ if (drm_intel_reg_read(intel->bufmgr, TIMESTAMP, &dummy) == 0)
+ ctx->Extensions.ARB_timer_query = true;
+ }
+
+ if (intel->gen >= 4) {
+ ctx->Extensions.ARB_color_buffer_float = true;
+ ctx->Extensions.ARB_depth_buffer_float = true;
+ ctx->Extensions.ARB_depth_clamp = true;
+ ctx->Extensions.ARB_draw_instanced = true;
+ ctx->Extensions.ARB_instanced_arrays = true;
+ ctx->Extensions.ARB_fragment_coord_conventions = true;
+ ctx->Extensions.ARB_fragment_program_shadow = true;
+ ctx->Extensions.ARB_fragment_shader = true;
+ ctx->Extensions.ARB_half_float_vertex = true;
+ ctx->Extensions.ARB_occlusion_query = true;
+ ctx->Extensions.ARB_occlusion_query2 = true;
+ ctx->Extensions.ARB_point_sprite = true;
+ ctx->Extensions.ARB_seamless_cube_map = true;
+ ctx->Extensions.ARB_shader_bit_encoding = true;
+ ctx->Extensions.ARB_shader_texture_lod = true;
+#ifdef TEXTURE_FLOAT_ENABLED
+ ctx->Extensions.ARB_texture_float = true;
+ ctx->Extensions.EXT_texture_shared_exponent = true;
+ ctx->Extensions.EXT_packed_float = true;
+#endif
+ ctx->Extensions.ARB_texture_compression_rgtc = true;
+ ctx->Extensions.ARB_texture_rg = true;
+ ctx->Extensions.EXT_draw_buffers2 = true;
+ ctx->Extensions.EXT_framebuffer_sRGB = true;
+ ctx->Extensions.EXT_texture_array = true;
+ ctx->Extensions.EXT_texture_integer = true;
+ ctx->Extensions.EXT_texture_snorm = true;
+ ctx->Extensions.EXT_texture_sRGB = true;
+ ctx->Extensions.EXT_texture_sRGB_decode = true;
+ ctx->Extensions.EXT_texture_swizzle = true;
+ ctx->Extensions.EXT_vertex_array_bgra = true;
+ ctx->Extensions.ATI_envmap_bumpmap = true;
+ ctx->Extensions.MESA_texture_array = true;
+ ctx->Extensions.NV_conditional_render = true;
+ }
+
+ if (intel->gen >= 3) {
+ ctx->Extensions.ARB_ES2_compatibility = true;
+ ctx->Extensions.ARB_depth_texture = true;
+ ctx->Extensions.ARB_fragment_program = true;
+ ctx->Extensions.ARB_shadow = true;
+ ctx->Extensions.ARB_texture_non_power_of_two = true;
+ ctx->Extensions.EXT_shadow_funcs = true;
+ ctx->Extensions.EXT_stencil_two_side = true;
+ ctx->Extensions.ATI_separate_stencil = true;
+ ctx->Extensions.ATI_texture_env_combine3 = true;
+ ctx->Extensions.NV_texture_env_combine4 = true;
+
+ if (driQueryOptionb(&intel->optionCache, "fragment_shader"))
+ ctx->Extensions.ARB_fragment_shader = true;
+
+ if (driQueryOptionb(&intel->optionCache, "stub_occlusion_query"))
+ ctx->Extensions.ARB_occlusion_query = true;
+ }
+
+ if (intel->ctx.Mesa_DXTn) {
+ ctx->Extensions.EXT_texture_compression_s3tc = true;
+ ctx->Extensions.S3_s3tc = true;
+ }
+ else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
+ ctx->Extensions.EXT_texture_compression_s3tc = true;
+ }
+
+ if (intel->gen >= 4) {
+ ctx->Extensions.NV_primitive_restart = true;
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_fbo.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_fbo.c
index a19f86dcc57..65494a2d506 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_fbo.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_fbo.c
@@ -1 +1,964 @@
-../intel/intel_fbo.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/enums.h"
+#include "main/imports.h"
+#include "main/macros.h"
+#include "main/mfeatures.h"
+#include "main/mtypes.h"
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/context.h"
+#include "main/teximage.h"
+#include "main/image.h"
+
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_blit.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_tex.h"
+#include "intel_span.h"
+#ifndef I915
+#include "brw_context.h"
+#endif
+
+#define FILE_DEBUG_FLAG DEBUG_FBO
+
+static struct gl_renderbuffer *
+intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
+
+struct intel_region*
+intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
+{
+ struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
+ if (irb && irb->mt) {
+ if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
+ return irb->mt->stencil_mt->region;
+ else
+ return irb->mt->region;
+ } else
+ return NULL;
+}
+
+/**
+ * Create a new framebuffer object.
+ */
+static struct gl_framebuffer *
+intel_new_framebuffer(struct gl_context * ctx, GLuint name)
+{
+ /* Only drawable state in intel_framebuffer at this time, just use Mesa's
+ * class
+ */
+ return _mesa_new_framebuffer(ctx, name);
+}
+
+
+/** Called by gl_renderbuffer::Delete() */
+static void
+intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ ASSERT(irb);
+
+ intel_miptree_release(&irb->mt);
+
+ _mesa_delete_renderbuffer(ctx, rb);
+}
+
+/**
+ * \see dd_function_table::MapRenderbuffer
+ */
+static void
+intel_map_renderbuffer(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **out_map,
+ GLint *out_stride)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ void *map;
+ int stride;
+
+ if (srb->Buffer) {
+ /* this is a malloc'd renderbuffer (accum buffer), not an irb */
+ GLint bpp = _mesa_get_format_bytes(rb->Format);
+ GLint rowStride = srb->RowStride;
+ *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
+ *out_stride = rowStride;
+ return;
+ }
+
+ /* We sometimes get called with this by our intel_span.c usage. */
+ if (!irb->mt) {
+ *out_map = NULL;
+ *out_stride = 0;
+ return;
+ }
+
+ /* For a window-system renderbuffer, we need to flip the mapping we receive
+ * upside-down. So we need to ask for a rectangle on flipped vertically, and
+ * we then return a pointer to the bottom of it with a negative stride.
+ */
+ if (rb->Name == 0) {
+ y = rb->Height - y - h;
+ }
+
+ intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
+ x, y, w, h, mode, &map, &stride);
+
+ if (rb->Name == 0) {
+ map += (h - 1) * stride;
+ stride = -stride;
+ }
+
+ DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
+ __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
+ x, y, w, h, map, stride);
+
+ *out_map = map;
+ *out_stride = stride;
+}
+
+/**
+ * \see dd_function_table::UnmapRenderbuffer
+ */
+static void
+intel_unmap_renderbuffer(struct gl_context *ctx,
+ struct gl_renderbuffer *rb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ DBG("%s: rb %d (%s)\n", __FUNCTION__,
+ rb->Name, _mesa_get_format_name(rb->Format));
+
+ if (srb->Buffer) {
+ /* this is a malloc'd renderbuffer (accum buffer) */
+ /* nothing to do */
+ return;
+ }
+
+ intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
+}
+
+
+/**
+ * Round up the requested multisample count to the next supported sample size.
+ */
+unsigned
+intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
+{
+ switch (intel->gen) {
+ case 6:
+ /* Gen6 supports only 4x multisampling. */
+ if (num_samples > 0)
+ return 4;
+ else
+ return 0;
+ case 7:
+ /* Gen7 supports 4x and 8x multisampling. */
+ if (num_samples > 4)
+ return 8;
+ else if (num_samples > 0)
+ return 4;
+ else
+ return 0;
+ return 0;
+ default:
+ /* MSAA unsupported. However, a careful reading of
+ * EXT_framebuffer_multisample reveals that we need to permit
+ * num_samples to be 1 (since num_samples is permitted to be as high as
+ * GL_MAX_SAMPLES, and GL_MAX_SAMPLES must be at least 1). Since
+ * platforms before Gen6 don't support MSAA, this is safe, because
+ * multisampling won't happen anyhow.
+ */
+ if (num_samples > 0)
+ return 1;
+ return 0;
+ }
+}
+
+
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+GLboolean
+intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_screen *screen = intel->intelScreen;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
+
+ switch (internalFormat) {
+ default:
+ /* Use the same format-choice logic as for textures.
+ * Renderbuffers aren't any different from textures for us,
+ * except they're less useful because you can't texture with
+ * them.
+ */
+ rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
+ internalFormat,
+ GL_NONE, GL_NONE);
+ break;
+ case GL_STENCIL_INDEX:
+ case GL_STENCIL_INDEX1_EXT:
+ case GL_STENCIL_INDEX4_EXT:
+ case GL_STENCIL_INDEX8_EXT:
+ case GL_STENCIL_INDEX16_EXT:
+ /* These aren't actual texture formats, so force them here. */
+ if (intel->has_separate_stencil) {
+ rb->Format = MESA_FORMAT_S8;
+ } else {
+ assert(!intel->must_use_separate_stencil);
+ rb->Format = MESA_FORMAT_S8_Z24;
+ }
+ break;
+ }
+
+ rb->Width = width;
+ rb->Height = height;
+ rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
+
+ intel_miptree_release(&irb->mt);
+
+ DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(internalFormat),
+ _mesa_get_format_name(rb->Format), width, height);
+
+ if (width == 0 || height == 0)
+ return true;
+
+ irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
+ width, height,
+ rb->NumSamples);
+ if (!irb->mt)
+ return false;
+
+ return true;
+}
+
+
+#if FEATURE_OES_EGL_image
+static void
+intel_image_target_renderbuffer_storage(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ void *image_handle)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb;
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = intel->intelScreen->driScrnPriv;
+ image = screen->dri2.image->lookupEGLImage(screen, image_handle,
+ screen->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ /* __DRIimage is opaque to the core so it has to be checked here */
+ switch (image->format) {
+ case MESA_FORMAT_RGBA8888_REV:
+ _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
+ "glEGLImageTargetRenderbufferStorage(unsupported image format");
+ return;
+ break;
+ default:
+ break;
+ }
+
+ irb = intel_renderbuffer(rb);
+ intel_miptree_release(&irb->mt);
+ irb->mt = intel_miptree_create_for_region(intel,
+ GL_TEXTURE_2D,
+ image->format,
+ image->region);
+ if (!irb->mt)
+ return;
+
+ rb->InternalFormat = image->internal_format;
+ rb->Width = image->region->width;
+ rb->Height = image->region->height;
+ rb->Format = image->format;
+ rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
+ image->internal_format);
+}
+#endif
+
+/**
+ * Called for each hardware renderbuffer when a _window_ is resized.
+ * Just update fields.
+ * Not used for user-created renderbuffers!
+ */
+static GLboolean
+intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ ASSERT(rb->Name == 0);
+ rb->Width = width;
+ rb->Height = height;
+ rb->InternalFormat = internalFormat;
+
+ return true;
+}
+
+
+static void
+intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
+ GLuint width, GLuint height)
+{
+ int i;
+
+ _mesa_resize_framebuffer(ctx, fb, width, height);
+
+ fb->Initialized = true; /* XXX remove someday */
+
+ if (_mesa_is_user_fbo(fb)) {
+ return;
+ }
+
+
+ /* Make sure all window system renderbuffers are up to date */
+ for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
+ struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
+
+ /* only resize if size is changing */
+ if (rb && (rb->Width != width || rb->Height != height)) {
+ rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
+ }
+ }
+}
+
+
+/** Dummy function for gl_renderbuffer::AllocStorage() */
+static GLboolean
+intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
+ return false;
+}
+
+/**
+ * Create a new intel_renderbuffer which corresponds to an on-screen window,
+ * not a user-created renderbuffer.
+ *
+ * \param num_samples must be quantized.
+ */
+struct intel_renderbuffer *
+intel_create_renderbuffer(gl_format format, unsigned num_samples)
+{
+ struct intel_renderbuffer *irb;
+ struct gl_renderbuffer *rb;
+
+ GET_CURRENT_CONTEXT(ctx);
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ rb = &irb->Base.Base;
+
+ _mesa_init_renderbuffer(rb, 0);
+ rb->ClassID = INTEL_RB_CLASS;
+ rb->_BaseFormat = _mesa_get_format_base_format(format);
+ rb->Format = format;
+ rb->InternalFormat = rb->_BaseFormat;
+ rb->NumSamples = num_samples;
+
+ /* intel-specific methods */
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_alloc_window_storage;
+
+ return irb;
+}
+
+/**
+ * Private window-system buffers (as opposed to ones shared with the display
+ * server created with intel_create_renderbuffer()) are most similar in their
+ * handling to user-created renderbuffers, but they have a resize handler that
+ * may be called at intel_update_renderbuffers() time.
+ *
+ * \param num_samples must be quantized.
+ */
+struct intel_renderbuffer *
+intel_create_private_renderbuffer(gl_format format, unsigned num_samples)
+{
+ struct intel_renderbuffer *irb;
+
+ irb = intel_create_renderbuffer(format, num_samples);
+ irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
+
+ return irb;
+}
+
+/**
+ * Create a new renderbuffer object.
+ * Typically called via glBindRenderbufferEXT().
+ */
+static struct gl_renderbuffer *
+intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
+{
+ /*struct intel_context *intel = intel_context(ctx); */
+ struct intel_renderbuffer *irb;
+ struct gl_renderbuffer *rb;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ rb = &irb->Base.Base;
+
+ _mesa_init_renderbuffer(rb, name);
+ rb->ClassID = INTEL_RB_CLASS;
+
+ /* intel-specific methods */
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_alloc_renderbuffer_storage;
+ /* span routines set in alloc_storage function */
+
+ return rb;
+}
+
+
+/**
+ * Called via glBindFramebufferEXT().
+ */
+static void
+intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
+ struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
+{
+ if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ intel_draw_buffer(ctx);
+ }
+ else {
+ /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
+ }
+}
+
+
+/**
+ * Called via glFramebufferRenderbufferEXT().
+ */
+static void
+intel_framebuffer_renderbuffer(struct gl_context * ctx,
+ struct gl_framebuffer *fb,
+ GLenum attachment, struct gl_renderbuffer *rb)
+{
+ DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
+
+ _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
+ intel_draw_buffer(ctx);
+}
+
+/**
+ * \par Special case for separate stencil
+ *
+ * When wrapping a depthstencil texture that uses separate stencil, this
+ * function is recursively called twice: once to create \c
+ * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
+ * call to create \c irb->wrapped_depth, the \c format and \c
+ * internal_format parameters do not match \c mt->format. In that case, \c
+ * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
+ * MESA_FORMAT_X8_Z24.
+ *
+ * @return true on success
+ */
+
+static bool
+intel_renderbuffer_update_wrapper(struct intel_context *intel,
+ struct intel_renderbuffer *irb,
+ struct gl_texture_image *image,
+ uint32_t layer)
+{
+ struct gl_renderbuffer *rb = &irb->Base.Base;
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+ int level = image->Level;
+
+ rb->Format = image->TexFormat;
+ rb->InternalFormat = image->InternalFormat;
+ rb->_BaseFormat = image->_BaseFormat;
+ rb->Width = mt->level[level].width;
+ rb->Height = mt->level[level].height;
+
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_nop_alloc_storage;
+
+ intel_miptree_check_level_layer(mt, level, layer);
+ irb->mt_level = level;
+ irb->mt_layer = layer;
+
+ intel_miptree_reference(&irb->mt, mt);
+
+ intel_renderbuffer_set_draw_offset(irb);
+
+ if (mt->hiz_mt == NULL &&
+ intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
+ intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
+ if (!mt->hiz_mt)
+ return false;
+ }
+
+ return true;
+}
+
+void
+intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
+{
+ unsigned int dst_x, dst_y;
+
+ /* compute offset of the particular 2D image within the texture region */
+ intel_miptree_get_image_offset(irb->mt,
+ irb->mt_level,
+ 0, /* face, which we ignore */
+ irb->mt_layer,
+ &dst_x, &dst_y);
+
+ irb->draw_x = dst_x;
+ irb->draw_y = dst_y;
+}
+
+/**
+ * Rendering to tiled buffers requires that the base address of the
+ * buffer be aligned to a page boundary. We generally render to
+ * textures by pointing the surface at the mipmap image level, which
+ * may not be aligned to a tile boundary.
+ *
+ * This function returns an appropriately-aligned base offset
+ * according to the tiling restrictions, plus any required x/y offset
+ * from there.
+ */
+uint32_t
+intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
+ uint32_t *tile_x,
+ uint32_t *tile_y)
+{
+ struct intel_region *region = irb->mt->region;
+ uint32_t mask_x, mask_y;
+
+ intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
+
+ *tile_x = irb->draw_x & mask_x;
+ *tile_y = irb->draw_y & mask_y;
+ return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
+ irb->draw_y & ~mask_y, false);
+}
+
+/**
+ * Called by glFramebufferTexture[123]DEXT() (and other places) to
+ * prepare for rendering into texture memory. This might be called
+ * many times to choose different texture levels, cube faces, etc
+ * before intel_finish_render_texture() is ever called.
+ */
+static void
+intel_render_texture(struct gl_context * ctx,
+ struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+ int layer;
+
+ (void) fb;
+
+ if (att->CubeMapFace > 0) {
+ assert(att->Zoffset == 0);
+ layer = att->CubeMapFace;
+ } else {
+ layer = att->Zoffset;
+ }
+
+ if (!intel_image->mt) {
+ /* Fallback on drawing to a texture that doesn't have a miptree
+ * (has a border, width/height 0, etc.)
+ */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+ else if (!irb) {
+ intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
+
+ irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
+
+ if (irb) {
+ /* bind the wrapper to the attachment point */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
+ }
+ else {
+ /* fallback to software rendering */
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+ }
+
+ if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+
+ irb->tex_image = image;
+
+ DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
+ _mesa_get_format_name(image->TexFormat),
+ att->Texture->Name, image->Width, image->Height,
+ irb->Base.Base.RefCount);
+
+ /* update drawing region, etc */
+ intel_draw_buffer(ctx);
+}
+
+
+/**
+ * Called by Mesa when rendering to a texture is done.
+ */
+static void
+intel_finish_render_texture(struct gl_context * ctx,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_texture_object *tex_obj = att->Texture;
+ struct gl_texture_image *image =
+ tex_obj->Image[att->CubeMapFace][att->TextureLevel];
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+
+ DBG("Finish render %s texture tex=%u\n",
+ _mesa_get_format_name(image->TexFormat), att->Texture->Name);
+
+ if (irb)
+ irb->tex_image = NULL;
+
+ /* Since we've (probably) rendered to the texture and will (likely) use
+ * it in the texture domain later on in this batchbuffer, flush the
+ * batch. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer like GEM does in the kernel.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+}
+
+/**
+ * Do additional "completeness" testing of a framebuffer object.
+ */
+static void
+intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const struct intel_renderbuffer *depthRb =
+ intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ const struct intel_renderbuffer *stencilRb =
+ intel_get_renderbuffer(fb, BUFFER_STENCIL);
+ struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
+ int i;
+
+ DBG("%s() on fb %p (%s)\n", __FUNCTION__,
+ fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
+ (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
+
+ if (depthRb)
+ depth_mt = depthRb->mt;
+ if (stencilRb) {
+ stencil_mt = stencilRb->mt;
+ if (stencil_mt->stencil_mt)
+ stencil_mt = stencil_mt->stencil_mt;
+ }
+
+ if (depth_mt && stencil_mt) {
+ if (depth_mt == stencil_mt) {
+ /* For true packed depth/stencil (not faked on prefers-separate-stencil
+ * hardware) we need to be sure they're the same level/layer, since
+ * we'll be emitting a single packet describing the packed setup.
+ */
+ if (depthRb->mt_level != stencilRb->mt_level ||
+ depthRb->mt_layer != stencilRb->mt_layer) {
+ DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
+ depthRb->mt_level,
+ depthRb->mt_layer,
+ stencilRb->mt_level,
+ stencilRb->mt_layer);
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ } else {
+ if (!intel->has_separate_stencil) {
+ DBG("separate stencil unsupported\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ if (stencil_mt->format != MESA_FORMAT_S8) {
+ DBG("separate stencil is %s instead of S8\n",
+ _mesa_get_format_name(stencil_mt->format));
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
+ /* Before Gen7, separate depth and stencil buffers can be used
+ * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
+ * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
+ * [DevSNB]: This field must be set to the same value (enabled
+ * or disabled) as Hierarchical Depth Buffer Enable.
+ */
+ DBG("separate stencil without HiZ\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
+ }
+ }
+ }
+
+ for (i = 0; i < Elements(fb->Attachment); i++) {
+ struct gl_renderbuffer *rb;
+ struct intel_renderbuffer *irb;
+
+ if (fb->Attachment[i].Type == GL_NONE)
+ continue;
+
+ /* A supported attachment will have a Renderbuffer set either
+ * from being a Renderbuffer or being a texture that got the
+ * intel_wrap_texture() treatment.
+ */
+ rb = fb->Attachment[i].Renderbuffer;
+ if (rb == NULL) {
+ DBG("attachment without renderbuffer\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ continue;
+ }
+
+ if (fb->Attachment[i].Type == GL_TEXTURE) {
+ const struct gl_texture_image *img =
+ _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
+
+ if (img->Border) {
+ DBG("texture with border\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ continue;
+ }
+ }
+
+ irb = intel_renderbuffer(rb);
+ if (irb == NULL) {
+ DBG("software rendering renderbuffer\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ continue;
+ }
+
+ if (!intel->vtbl.render_target_supported(intel, rb)) {
+ DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
+ _mesa_get_format_name(intel_rb_format(irb)));
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ }
+}
+
+/**
+ * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
+ * We can do this when the dst renderbuffer is actually a texture and
+ * there is no scaling, mirroring or scissoring.
+ *
+ * \return new buffer mask indicating the buffers left to blit using the
+ * normal path.
+ */
+static GLbitfield
+intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
+ GLint srcX0, GLint srcY0,
+ GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0,
+ GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter)
+{
+ if (mask & GL_COLOR_BUFFER_BIT) {
+ const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
+ const struct gl_framebuffer *readFb = ctx->ReadBuffer;
+ const struct gl_renderbuffer_attachment *drawAtt =
+ &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
+ struct intel_renderbuffer *srcRb =
+ intel_renderbuffer(readFb->_ColorReadBuffer);
+
+ /* If the source and destination are the same size with no
+ mirroring, the rectangles are within the size of the
+ texture and there is no scissor then we can use
+ glCopyTexSubimage2D to implement the blit. This will end
+ up as a fast hardware blit on some drivers */
+ if (srcRb && drawAtt && drawAtt->Texture &&
+ srcX0 - srcX1 == dstX0 - dstX1 &&
+ srcY0 - srcY1 == dstY0 - dstY1 &&
+ srcX1 >= srcX0 &&
+ srcY1 >= srcY0 &&
+ srcX0 >= 0 && srcX1 <= readFb->Width &&
+ srcY0 >= 0 && srcY1 <= readFb->Height &&
+ dstX0 >= 0 && dstX1 <= drawFb->Width &&
+ dstY0 >= 0 && dstY1 <= drawFb->Height &&
+ !ctx->Scissor.Enabled) {
+ const struct gl_texture_object *texObj = drawAtt->Texture;
+ const GLuint dstLevel = drawAtt->TextureLevel;
+ const GLenum target = texObj->Target;
+
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, dstLevel);
+
+ if (intel_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ dstX0, dstY0,
+ srcRb,
+ srcX0, srcY0,
+ srcX1 - srcX0, /* width */
+ srcY1 - srcY0))
+ mask &= ~GL_COLOR_BUFFER_BIT;
+ }
+ }
+
+ return mask;
+}
+
+static void
+intel_blit_framebuffer(struct gl_context *ctx,
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter)
+{
+ /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
+ mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+
+#ifndef I915
+ mask = brw_blorp_framebuffer(intel_context(ctx),
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+#endif
+
+ _mesa_meta_BlitFramebuffer(ctx,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+}
+
+/**
+ * This is a no-op except on multisample buffers shared with DRI2.
+ */
+void
+intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
+{
+ if (irb->mt && irb->mt->singlesample_mt)
+ irb->mt->need_downsample = true;
+}
+
+void
+intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
+{
+ if (irb->mt) {
+ intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+ }
+}
+
+void
+intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
+{
+ if (irb->mt) {
+ intel_miptree_slice_set_needs_depth_resolve(irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+ }
+}
+
+bool
+intel_renderbuffer_resolve_hiz(struct intel_context *intel,
+ struct intel_renderbuffer *irb)
+{
+ if (irb->mt)
+ return intel_miptree_slice_resolve_hiz(intel,
+ irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+
+ return false;
+}
+
+bool
+intel_renderbuffer_resolve_depth(struct intel_context *intel,
+ struct intel_renderbuffer *irb)
+{
+ if (irb->mt)
+ return intel_miptree_slice_resolve_depth(intel,
+ irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+
+ return false;
+}
+
+/**
+ * Do one-time context initializations related to GL_EXT_framebuffer_object.
+ * Hook in device driver functions.
+ */
+void
+intel_fbo_init(struct intel_context *intel)
+{
+ intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
+ intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
+ intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
+ intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
+ intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
+ intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
+ intel->ctx.Driver.RenderTexture = intel_render_texture;
+ intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
+ intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
+ intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
+ intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
+
+#if FEATURE_OES_EGL_image
+ intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
+ intel_image_target_renderbuffer_storage;
+#endif
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_mipmap_tree.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_mipmap_tree.c
index 242fed0b6ae..556a82fc7ff 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_mipmap_tree.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_mipmap_tree.c
@@ -1 +1,1711 @@
-../intel/intel_mipmap_tree.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <GL/gl.h>
+#include <GL/internal/dri_interface.h>
+
+#include "intel_batchbuffer.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_resolve_map.h"
+#include "intel_span.h"
+#include "intel_tex_layout.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+
+#ifndef I915
+#include "brw_blorp.h"
+#endif
+
+#include "main/enums.h"
+#include "main/formats.h"
+#include "main/glformats.h"
+#include "main/texcompress_etc.h"
+#include "main/teximage.h"
+
+#define FILE_DEBUG_FLAG DEBUG_MIPTREE
+
+static GLenum
+target_to_target(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return GL_TEXTURE_CUBE_MAP_ARB;
+ default:
+ return target;
+ }
+}
+
+/**
+ * @param for_region Indicates that the caller is
+ * intel_miptree_create_for_region(). If true, then do not create
+ * \c stencil_mt.
+ */
+static struct intel_mipmap_tree *
+intel_miptree_create_internal(struct intel_context *intel,
+ GLenum target,
+ gl_format format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0,
+ bool for_region,
+ GLuint num_samples,
+ enum intel_msaa_layout msaa_layout)
+{
+ struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
+ int compress_byte = 0;
+
+ DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ _mesa_get_format_name(format),
+ first_level, last_level, mt);
+
+ if (_mesa_is_format_compressed(format))
+ compress_byte = intel_compressed_num_bytes(format);
+
+ mt->target = target_to_target(target);
+ mt->format = format;
+ mt->first_level = first_level;
+ mt->last_level = last_level;
+ mt->width0 = width0;
+ mt->height0 = height0;
+ mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
+ mt->num_samples = num_samples;
+ mt->compressed = compress_byte ? 1 : 0;
+ mt->msaa_layout = msaa_layout;
+ mt->refcount = 1;
+
+ /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
+ * use it elsewhere?
+ */
+ switch (msaa_layout) {
+ case INTEL_MSAA_LAYOUT_NONE:
+ case INTEL_MSAA_LAYOUT_IMS:
+ mt->array_spacing_lod0 = false;
+ break;
+ case INTEL_MSAA_LAYOUT_UMS:
+ case INTEL_MSAA_LAYOUT_CMS:
+ mt->array_spacing_lod0 = true;
+ break;
+ }
+
+ if (target == GL_TEXTURE_CUBE_MAP) {
+ assert(depth0 == 1);
+ mt->depth0 = 6;
+ } else {
+ mt->depth0 = depth0;
+ }
+
+ if (!for_region &&
+ _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
+ (intel->must_use_separate_stencil ||
+ (intel->has_separate_stencil &&
+ intel->vtbl.is_hiz_depth_format(intel, format)))) {
+ /* MSAA stencil surfaces always use IMS layout. */
+ enum intel_msaa_layout msaa_layout =
+ num_samples > 1 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
+ mt->stencil_mt = intel_miptree_create(intel,
+ mt->target,
+ MESA_FORMAT_S8,
+ mt->first_level,
+ mt->last_level,
+ mt->width0,
+ mt->height0,
+ mt->depth0,
+ true,
+ num_samples,
+ msaa_layout);
+ if (!mt->stencil_mt) {
+ intel_miptree_release(&mt);
+ return NULL;
+ }
+
+ /* Fix up the Z miptree format for how we're splitting out separate
+ * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
+ */
+ if (mt->format == MESA_FORMAT_S8_Z24) {
+ mt->format = MESA_FORMAT_X8_Z24;
+ } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
+ mt->format = MESA_FORMAT_Z32_FLOAT;
+ mt->cpp = 4;
+ } else {
+ _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
+ _mesa_get_format_name(mt->format));
+ }
+ }
+
+ intel_get_texture_alignment_unit(intel, mt->format,
+ &mt->align_w, &mt->align_h);
+
+#ifdef I915
+ (void) intel;
+ if (intel->is_945)
+ i945_miptree_layout(mt);
+ else
+ i915_miptree_layout(mt);
+#else
+ brw_miptree_layout(intel, mt);
+#endif
+
+ return mt;
+}
+
+
+struct intel_mipmap_tree *
+intel_miptree_create(struct intel_context *intel,
+ GLenum target,
+ gl_format format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0,
+ bool expect_accelerated_upload,
+ GLuint num_samples,
+ enum intel_msaa_layout msaa_layout)
+{
+ struct intel_mipmap_tree *mt;
+ uint32_t tiling = I915_TILING_NONE;
+ GLenum base_format;
+ bool wraps_etc1 = false;
+ GLuint total_width, total_height;
+
+ if (format == MESA_FORMAT_ETC1_RGB8) {
+ format = MESA_FORMAT_RGBX8888_REV;
+ wraps_etc1 = true;
+ }
+
+ base_format = _mesa_get_format_base_format(format);
+
+ if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
+ if (intel->gen >= 4 &&
+ (base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL_EXT))
+ tiling = I915_TILING_Y;
+ else if (msaa_layout != INTEL_MSAA_LAYOUT_NONE) {
+ /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
+ * Surface"):
+ *
+ * [DevSNB+]: For multi-sample render targets, this field must be
+ * 1. MSRTs can only be tiled.
+ *
+ * Our usual reason for preferring X tiling (fast blits using the
+ * blitting engine) doesn't apply to MSAA, since we'll generally be
+ * downsampling or upsampling when blitting between the MSAA buffer
+ * and another buffer, and the blitting engine doesn't support that.
+ * So use Y tiling, since it makes better use of the cache.
+ */
+ tiling = I915_TILING_Y;
+ } else if (width0 >= 64)
+ tiling = I915_TILING_X;
+ }
+
+ mt = intel_miptree_create_internal(intel, target, format,
+ first_level, last_level, width0,
+ height0, depth0,
+ false, num_samples, msaa_layout);
+ /*
+ * pitch == 0 || height == 0 indicates the null texture
+ */
+ if (!mt || !mt->total_width || !mt->total_height) {
+ intel_miptree_release(&mt);
+ return NULL;
+ }
+
+ total_width = mt->total_width;
+ total_height = mt->total_height;
+
+ if (format == MESA_FORMAT_S8) {
+ /* The stencil buffer is W tiled. However, we request from the kernel a
+ * non-tiled buffer because the GTT is incapable of W fencing. So round
+ * up the width and height to match the size of W tiles (64x64).
+ */
+ tiling = I915_TILING_NONE;
+ total_width = ALIGN(total_width, 64);
+ total_height = ALIGN(total_height, 64);
+ }
+
+ mt->wraps_etc1 = wraps_etc1;
+ mt->region = intel_region_alloc(intel->intelScreen,
+ tiling,
+ mt->cpp,
+ total_width,
+ total_height,
+ expect_accelerated_upload);
+ mt->offset = 0;
+
+ if (!mt->region) {
+ intel_miptree_release(&mt);
+ return NULL;
+ }
+
+ return mt;
+}
+
+
+struct intel_mipmap_tree *
+intel_miptree_create_for_region(struct intel_context *intel,
+ GLenum target,
+ gl_format format,
+ struct intel_region *region)
+{
+ struct intel_mipmap_tree *mt;
+
+ mt = intel_miptree_create_internal(intel, target, format,
+ 0, 0,
+ region->width, region->height, 1,
+ true, 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_NONE);
+ if (!mt)
+ return mt;
+
+ intel_region_reference(&mt->region, region);
+
+ return mt;
+}
+
+/**
+ * Determine which MSAA layout should be used by the MSAA surface being
+ * created, based on the chip generation and the surface type.
+ */
+static enum intel_msaa_layout
+compute_msaa_layout(struct intel_context *intel, gl_format format)
+{
+ /* Prior to Gen7, all MSAA surfaces used IMS layout. */
+ if (intel->gen < 7)
+ return INTEL_MSAA_LAYOUT_IMS;
+
+ /* In Gen7, IMS layout is only used for depth and stencil buffers. */
+ switch (_mesa_get_format_base_format(format)) {
+ case GL_DEPTH_COMPONENT:
+ case GL_STENCIL_INDEX:
+ case GL_DEPTH_STENCIL:
+ return INTEL_MSAA_LAYOUT_IMS;
+ default:
+ /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
+ *
+ * This field must be set to 0 for all SINT MSRTs when all RT channels
+ * are not written
+ *
+ * In practice this means that we have to disable MCS for all signed
+ * integer MSAA buffers. The alternative, to disable MCS only when one
+ * of the render target channels is disabled, is impractical because it
+ * would require converting between CMS and UMS MSAA layouts on the fly,
+ * which is expensive.
+ */
+ if (_mesa_get_format_datatype(format) == GL_INT) {
+ /* TODO: is this workaround needed for future chipsets? */
+ assert(intel->gen == 7);
+ return INTEL_MSAA_LAYOUT_UMS;
+ } else {
+ return INTEL_MSAA_LAYOUT_CMS;
+ }
+ }
+}
+
+/**
+ * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
+ *
+ * For a multisample DRI2 buffer, this wraps the given region with
+ * a singlesample miptree, then creates a multisample miptree into which the
+ * singlesample miptree is embedded as a child.
+ */
+struct intel_mipmap_tree*
+intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+ unsigned dri_attachment,
+ gl_format format,
+ uint32_t num_samples,
+ struct intel_region *region)
+{
+ struct intel_mipmap_tree *singlesample_mt = NULL;
+ struct intel_mipmap_tree *multisample_mt = NULL;
+ GLenum base_format = _mesa_get_format_base_format(format);
+
+ /* Only the front and back buffers, which are color buffers, are shared
+ * through DRI2.
+ */
+ assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
+ dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+ dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
+ assert(base_format == GL_RGB || base_format == GL_RGBA);
+
+ singlesample_mt = intel_miptree_create_for_region(intel, GL_TEXTURE_2D,
+ format, region);
+ if (!singlesample_mt)
+ return NULL;
+
+ if (num_samples == 0)
+ return singlesample_mt;
+
+ multisample_mt = intel_miptree_create_for_renderbuffer(intel,
+ format,
+ region->width,
+ region->height,
+ num_samples);
+ if (!multisample_mt) {
+ intel_miptree_release(&singlesample_mt);
+ return NULL;
+ }
+
+ multisample_mt->singlesample_mt = singlesample_mt;
+ multisample_mt->need_downsample = false;
+
+ if (intel->is_front_buffer_rendering &&
+ (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+ dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
+ intel_miptree_upsample(intel, multisample_mt);
+ }
+
+ return multisample_mt;
+}
+
+struct intel_mipmap_tree*
+intel_miptree_create_for_renderbuffer(struct intel_context *intel,
+ gl_format format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t num_samples)
+{
+ struct intel_mipmap_tree *mt;
+ uint32_t depth = 1;
+ enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
+ const uint32_t singlesample_width = width;
+ const uint32_t singlesample_height = height;
+ bool ok;
+
+ if (num_samples > 1) {
+ /* Adjust width/height/depth for MSAA */
+ msaa_layout = compute_msaa_layout(intel, format);
+ if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
+ /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
+ *
+ * "Any of the other messages (sample*, LOD, load4) used with a
+ * (4x) multisampled surface will in-effect sample a surface with
+ * double the height and width as that indicated in the surface
+ * state. Each pixel position on the original-sized surface is
+ * replaced with a 2x2 of samples with the following arrangement:
+ *
+ * sample 0 sample 2
+ * sample 1 sample 3"
+ *
+ * Thus, when sampling from a multisampled texture, it behaves as
+ * though the layout in memory for (x,y,sample) is:
+ *
+ * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
+ * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
+ *
+ * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
+ * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
+ *
+ * However, the actual layout of multisampled data in memory is:
+ *
+ * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
+ * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
+ *
+ * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
+ * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
+ *
+ * This pattern repeats for each 2x2 pixel block.
+ *
+ * As a result, when calculating the size of our 4-sample buffer for
+ * an odd width or height, we have to align before scaling up because
+ * sample 3 is in that bottom right 2x2 block.
+ */
+ switch (num_samples) {
+ case 4:
+ width = ALIGN(width, 2) * 2;
+ height = ALIGN(height, 2) * 2;
+ break;
+ case 8:
+ width = ALIGN(width, 2) * 4;
+ height = ALIGN(height, 2) * 2;
+ break;
+ default:
+ /* num_samples should already have been quantized to 0, 1, 4, or
+ * 8.
+ */
+ assert(false);
+ }
+ } else {
+ /* Non-interleaved */
+ depth = num_samples;
+ }
+ }
+
+ mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
+ width, height, depth, true, num_samples,
+ msaa_layout);
+ if (!mt)
+ goto fail;
+
+ if (intel->vtbl.is_hiz_depth_format(intel, format)) {
+ ok = intel_miptree_alloc_hiz(intel, mt, num_samples);
+ if (!ok)
+ goto fail;
+ }
+
+ if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
+ ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
+ if (!ok)
+ goto fail;
+ }
+
+ mt->singlesample_width0 = singlesample_width;
+ mt->singlesample_height0 = singlesample_height;
+
+ return mt;
+
+fail:
+ intel_miptree_release(&mt);
+ return NULL;
+}
+
+void
+intel_miptree_reference(struct intel_mipmap_tree **dst,
+ struct intel_mipmap_tree *src)
+{
+ if (*dst == src)
+ return;
+
+ intel_miptree_release(dst);
+
+ if (src) {
+ src->refcount++;
+ DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
+ }
+
+ *dst = src;
+}
+
+
+void
+intel_miptree_release(struct intel_mipmap_tree **mt)
+{
+ if (!*mt)
+ return;
+
+ DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
+ if (--(*mt)->refcount <= 0) {
+ GLuint i;
+
+ DBG("%s deleting %p\n", __FUNCTION__, *mt);
+
+ intel_region_release(&((*mt)->region));
+ intel_miptree_release(&(*mt)->stencil_mt);
+ intel_miptree_release(&(*mt)->hiz_mt);
+ intel_miptree_release(&(*mt)->mcs_mt);
+ intel_miptree_release(&(*mt)->singlesample_mt);
+ intel_resolve_map_clear(&(*mt)->hiz_map);
+
+ for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
+ free((*mt)->level[i].slice);
+ }
+
+ free(*mt);
+ }
+ *mt = NULL;
+}
+
+void
+intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
+ int *width, int *height, int *depth)
+{
+ switch (image->TexObject->Target) {
+ case GL_TEXTURE_1D_ARRAY:
+ *width = image->Width;
+ *height = 1;
+ *depth = image->Height;
+ break;
+ default:
+ *width = image->Width;
+ *height = image->Height;
+ *depth = image->Depth;
+ break;
+ }
+}
+
+/**
+ * Can the image be pulled into a unified mipmap tree? This mirrors
+ * the completeness test in a lot of ways.
+ *
+ * Not sure whether I want to pass gl_texture_image here.
+ */
+bool
+intel_miptree_match_image(struct intel_mipmap_tree *mt,
+ struct gl_texture_image *image)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(image);
+ GLuint level = intelImage->base.Base.Level;
+ int width, height, depth;
+
+ if (target_to_target(image->TexObject->Target) != mt->target)
+ return false;
+
+ if (image->TexFormat != mt->format &&
+ !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
+ mt->format == MESA_FORMAT_X8_Z24 &&
+ mt->stencil_mt)) {
+ return false;
+ }
+
+ intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
+
+ if (mt->target == GL_TEXTURE_CUBE_MAP)
+ depth = 6;
+
+ /* Test image dimensions against the base level image adjusted for
+ * minification. This will also catch images not present in the
+ * tree, changed targets, etc.
+ */
+ if (width != mt->level[level].width ||
+ height != mt->level[level].height ||
+ depth != mt->level[level].depth)
+ return false;
+
+ return true;
+}
+
+
+void
+intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h, GLuint d)
+{
+ mt->level[level].width = w;
+ mt->level[level].height = h;
+ mt->level[level].depth = d;
+ mt->level[level].level_x = x;
+ mt->level[level].level_y = y;
+
+ DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
+ level, w, h, d, x, y);
+
+ assert(mt->level[level].slice == NULL);
+
+ mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
+ mt->level[level].slice[0].x_offset = mt->level[level].level_x;
+ mt->level[level].slice[0].y_offset = mt->level[level].level_y;
+}
+
+
+void
+intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint img,
+ GLuint x, GLuint y)
+{
+ if (img == 0 && level == 0)
+ assert(x == 0 && y == 0);
+
+ assert(img < mt->level[level].depth);
+
+ mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
+ mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
+
+ DBG("%s level %d img %d pos %d,%d\n",
+ __FUNCTION__, level, img,
+ mt->level[level].slice[img].x_offset,
+ mt->level[level].slice[img].y_offset);
+}
+
+
+/**
+ * For cube map textures, either the \c face parameter can be used, of course,
+ * or the cube face can be interpreted as a depth layer and the \c layer
+ * parameter used.
+ */
+void
+intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint face, GLuint layer,
+ GLuint *x, GLuint *y)
+{
+ int slice;
+
+ if (face > 0) {
+ assert(mt->target == GL_TEXTURE_CUBE_MAP);
+ assert(face < 6);
+ assert(layer == 0);
+ slice = face;
+ } else {
+ /* This branch may be taken even if the texture target is a cube map. In
+ * that case, the caller chose to interpret each cube face as a layer.
+ */
+ assert(face == 0);
+ slice = layer;
+ }
+
+ *x = mt->level[level].slice[slice].x_offset;
+ *y = mt->level[level].slice[slice].y_offset;
+}
+
+static void
+intel_miptree_copy_slice(struct intel_context *intel,
+ struct intel_mipmap_tree *dst_mt,
+ struct intel_mipmap_tree *src_mt,
+ int level,
+ int face,
+ int depth)
+
+{
+ gl_format format = src_mt->format;
+ uint32_t width = src_mt->level[level].width;
+ uint32_t height = src_mt->level[level].height;
+
+ assert(depth < src_mt->level[level].depth);
+
+ if (dst_mt->compressed) {
+ height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
+ width = ALIGN(width, dst_mt->align_w);
+ }
+
+ uint32_t dst_x, dst_y, src_x, src_y;
+ intel_miptree_get_image_offset(dst_mt, level, face, depth,
+ &dst_x, &dst_y);
+ intel_miptree_get_image_offset(src_mt, level, face, depth,
+ &src_x, &src_y);
+
+ DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
+ src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
+ dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
+ width, height);
+
+ if (!intelEmitCopyBlit(intel,
+ dst_mt->region->cpp,
+ src_mt->region->pitch, src_mt->region->bo,
+ 0, src_mt->region->tiling,
+ dst_mt->region->pitch, dst_mt->region->bo,
+ 0, dst_mt->region->tiling,
+ src_x, src_y,
+ dst_x, dst_y,
+ width, height,
+ GL_COPY)) {
+
+ fallback_debug("miptree validate blit for %s failed\n",
+ _mesa_get_format_name(format));
+ void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
+ void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
+
+ _mesa_copy_rect(dst,
+ dst_mt->cpp,
+ dst_mt->region->pitch,
+ dst_x, dst_y,
+ width, height,
+ src, src_mt->region->pitch,
+ src_x, src_y);
+
+ intel_region_unmap(intel, dst_mt->region);
+ intel_region_unmap(intel, src_mt->region);
+ }
+
+ if (src_mt->stencil_mt) {
+ intel_miptree_copy_slice(intel,
+ dst_mt->stencil_mt, src_mt->stencil_mt,
+ level, face, depth);
+ }
+}
+
+/**
+ * Copies the image's current data to the given miptree, and associates that
+ * miptree with the image.
+ */
+void
+intel_miptree_copy_teximage(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ struct intel_mipmap_tree *dst_mt)
+{
+ struct intel_mipmap_tree *src_mt = intelImage->mt;
+ int level = intelImage->base.Base.Level;
+ int face = intelImage->base.Base.Face;
+ GLuint depth = intelImage->base.Base.Depth;
+
+ for (int slice = 0; slice < depth; slice++) {
+ intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+ }
+
+ intel_miptree_reference(&intelImage->mt, dst_mt);
+}
+
+bool
+intel_miptree_alloc_mcs(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ GLuint num_samples)
+{
+ assert(mt->mcs_mt == NULL);
+ assert(intel->gen >= 7); /* MCS only used on Gen7+ */
+
+ /* Choose the correct format for the MCS buffer. All that really matters
+ * is that we allocate the right buffer size, since we'll always be
+ * accessing this miptree using MCS-specific hardware mechanisms, which
+ * infer the correct format based on num_samples.
+ */
+ gl_format format;
+ switch (num_samples) {
+ case 4:
+ /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
+ * each sample).
+ */
+ format = MESA_FORMAT_R8;
+ break;
+ case 8:
+ /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
+ * for each sample, plus 8 padding bits).
+ */
+ format = MESA_FORMAT_R_UINT32;
+ break;
+ default:
+ assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
+ break;
+ };
+
+ /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
+ *
+ * "The MCS surface must be stored as Tile Y."
+ *
+ * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
+ * intel_miptree_create() to use Y tiling. msaa_format is otherwise
+ * ignored for the MCS miptree.
+ */
+ mt->mcs_mt = intel_miptree_create(intel,
+ mt->target,
+ format,
+ mt->first_level,
+ mt->last_level,
+ mt->width0,
+ mt->height0,
+ mt->depth0,
+ true,
+ 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_CMS);
+
+ /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
+ *
+ * When MCS buffer is enabled and bound to MSRT, it is required that it
+ * is cleared prior to any rendering.
+ *
+ * Since we don't use the MCS buffer for any purpose other than rendering,
+ * it makes sense to just clear it immediately upon allocation.
+ *
+ * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
+ */
+ void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
+ memset(data, 0xff, mt->mcs_mt->region->bo->size);
+ intel_region_unmap(intel, mt->mcs_mt->region);
+
+ return mt->mcs_mt;
+}
+
+bool
+intel_miptree_alloc_hiz(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ GLuint num_samples)
+{
+ assert(mt->hiz_mt == NULL);
+ /* MSAA HiZ surfaces always use IMS layout. */
+ mt->hiz_mt = intel_miptree_create(intel,
+ mt->target,
+ MESA_FORMAT_X8_Z24,
+ mt->first_level,
+ mt->last_level,
+ mt->width0,
+ mt->height0,
+ mt->depth0,
+ true,
+ num_samples,
+ INTEL_MSAA_LAYOUT_IMS);
+
+ if (!mt->hiz_mt)
+ return false;
+
+ /* Mark that all slices need a HiZ resolve. */
+ struct intel_resolve_map *head = &mt->hiz_map;
+ for (int level = mt->first_level; level <= mt->last_level; ++level) {
+ for (int layer = 0; layer < mt->level[level].depth; ++layer) {
+ head->next = malloc(sizeof(*head->next));
+ head->next->prev = head;
+ head->next->next = NULL;
+ head = head->next;
+
+ head->level = level;
+ head->layer = layer;
+ head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
+ }
+ }
+
+ return true;
+}
+
+void
+intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ intel_miptree_check_level_layer(mt, level, layer);
+
+ if (!mt->hiz_mt)
+ return;
+
+ intel_resolve_map_set(&mt->hiz_map,
+ level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
+}
+
+
+void
+intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ intel_miptree_check_level_layer(mt, level, layer);
+
+ if (!mt->hiz_mt)
+ return;
+
+ intel_resolve_map_set(&mt->hiz_map,
+ level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
+}
+
+static bool
+intel_miptree_slice_resolve(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer,
+ enum gen6_hiz_op need)
+{
+ intel_miptree_check_level_layer(mt, level, layer);
+
+ struct intel_resolve_map *item =
+ intel_resolve_map_get(&mt->hiz_map, level, layer);
+
+ if (!item || item->need != need)
+ return false;
+
+ intel_hiz_exec(intel, mt, level, layer, need);
+ intel_resolve_map_remove(item);
+ return true;
+}
+
+bool
+intel_miptree_slice_resolve_hiz(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ return intel_miptree_slice_resolve(intel, mt, level, layer,
+ GEN6_HIZ_OP_HIZ_RESOLVE);
+}
+
+bool
+intel_miptree_slice_resolve_depth(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ return intel_miptree_slice_resolve(intel, mt, level, layer,
+ GEN6_HIZ_OP_DEPTH_RESOLVE);
+}
+
+static bool
+intel_miptree_all_slices_resolve(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ enum gen6_hiz_op need)
+{
+ bool did_resolve = false;
+ struct intel_resolve_map *i, *next;
+
+ for (i = mt->hiz_map.next; i; i = next) {
+ next = i->next;
+ if (i->need != need)
+ continue;
+
+ intel_hiz_exec(intel, mt, i->level, i->layer, need);
+ intel_resolve_map_remove(i);
+ did_resolve = true;
+ }
+
+ return did_resolve;
+}
+
+bool
+intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ return intel_miptree_all_slices_resolve(intel, mt,
+ GEN6_HIZ_OP_HIZ_RESOLVE);
+}
+
+bool
+intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ return intel_miptree_all_slices_resolve(intel, mt,
+ GEN6_HIZ_OP_DEPTH_RESOLVE);
+}
+
+static void
+intel_miptree_updownsample(struct intel_context *intel,
+ struct intel_mipmap_tree *src,
+ struct intel_mipmap_tree *dst,
+ unsigned width,
+ unsigned height)
+{
+#ifndef I915
+ int src_x0 = 0;
+ int src_y0 = 0;
+ int dst_x0 = 0;
+ int dst_y0 = 0;
+
+ intel_miptree_slice_resolve_depth(intel, src, 0, 0);
+ intel_miptree_slice_resolve_depth(intel, dst, 0, 0);
+
+ brw_blorp_blit_miptrees(intel,
+ src, 0 /* level */, 0 /* layer */,
+ dst, 0 /* level */, 0 /* layer */,
+ src_x0, src_y0,
+ dst_x0, dst_y0,
+ width, height,
+ false, false /*mirror x, y*/);
+
+ if (src->stencil_mt) {
+ brw_blorp_blit_miptrees(intel,
+ src->stencil_mt, 0 /* level */, 0 /* layer */,
+ dst->stencil_mt, 0 /* level */, 0 /* layer */,
+ src_x0, src_y0,
+ dst_x0, dst_y0,
+ width, height,
+ false, false /*mirror x, y*/);
+ }
+#endif /* I915 */
+}
+
+static void
+assert_is_flat(struct intel_mipmap_tree *mt)
+{
+ assert(mt->target == GL_TEXTURE_2D);
+ assert(mt->first_level == 0);
+ assert(mt->last_level == 0);
+}
+
+/**
+ * \brief Downsample from mt to mt->singlesample_mt.
+ *
+ * If the miptree needs no downsample, then skip.
+ */
+void
+intel_miptree_downsample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ assert_is_flat(mt);
+
+ if (!mt->need_downsample)
+ return;
+ intel_miptree_updownsample(intel,
+ mt, mt->singlesample_mt,
+ mt->singlesample_mt->width0,
+ mt->singlesample_mt->height0);
+ mt->need_downsample = false;
+
+ /* Strictly speaking, after a downsample on a depth miptree, a hiz
+ * resolve is needed on the singlesample miptree. However, since the
+ * singlesample miptree is never rendered to, the hiz resolve will never
+ * occur. Therefore we do not mark the needed hiz resolve after
+ * downsampling.
+ */
+}
+
+/**
+ * \brief Upsample from mt->singlesample_mt to mt.
+ *
+ * The upsample is done unconditionally.
+ */
+void
+intel_miptree_upsample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ assert_is_flat(mt);
+ assert(!mt->need_downsample);
+
+ intel_miptree_updownsample(intel,
+ mt->singlesample_mt, mt,
+ mt->singlesample_mt->width0,
+ mt->singlesample_mt->height0);
+ intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
+}
+
+static void
+intel_miptree_map_gtt(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ unsigned int bw, bh;
+ void *base;
+ unsigned int image_x, image_y;
+ int x = map->x;
+ int y = map->y;
+
+ /* For compressed formats, the stride is the number of bytes per
+ * row of blocks. intel_miptree_get_image_offset() already does
+ * the divide.
+ */
+ _mesa_get_format_block_size(mt->format, &bw, &bh);
+ assert(y % bh == 0);
+ y /= bh;
+
+ base = intel_region_map(intel, mt->region, map->mode);
+
+ if (base == NULL)
+ map->ptr = NULL;
+ else {
+ /* Note that in the case of cube maps, the caller must have passed the
+ * slice number referencing the face.
+ */
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+ x += image_x;
+ y += image_y;
+
+ map->stride = mt->region->pitch * mt->cpp;
+ map->ptr = base + y * map->stride + x * mt->cpp;
+ }
+
+ DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, _mesa_get_format_name(mt->format),
+ x, y, map->ptr, map->stride);
+}
+
+static void
+intel_miptree_unmap_gtt(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ intel_region_unmap(intel, mt->region);
+}
+
+static void
+intel_miptree_map_blit(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ unsigned int image_x, image_y;
+ int x = map->x;
+ int y = map->y;
+ int ret;
+
+ /* The blitter requires the pitch to be aligned to 4. */
+ map->stride = ALIGN(map->w * mt->region->cpp, 4);
+
+ map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
+ map->stride * map->h, 4096);
+ if (!map->bo) {
+ fprintf(stderr, "Failed to allocate blit temporary\n");
+ goto fail;
+ }
+
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+ x += image_x;
+ y += image_y;
+
+ if (!intelEmitCopyBlit(intel,
+ mt->region->cpp,
+ mt->region->pitch, mt->region->bo,
+ 0, mt->region->tiling,
+ map->stride / mt->region->cpp, map->bo,
+ 0, I915_TILING_NONE,
+ x, y,
+ 0, 0,
+ map->w, map->h,
+ GL_COPY)) {
+ fprintf(stderr, "Failed to blit\n");
+ goto fail;
+ }
+
+ intel_batchbuffer_flush(intel);
+ ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
+ if (ret) {
+ fprintf(stderr, "Failed to map blit temporary\n");
+ goto fail;
+ }
+
+ map->ptr = map->bo->virtual;
+
+ DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, _mesa_get_format_name(mt->format),
+ x, y, map->ptr, map->stride);
+
+ return;
+
+fail:
+ drm_intel_bo_unreference(map->bo);
+ map->ptr = NULL;
+ map->stride = 0;
+}
+
+static void
+intel_miptree_unmap_blit(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ assert(!(map->mode & GL_MAP_WRITE_BIT));
+
+ drm_intel_bo_unmap(map->bo);
+ drm_intel_bo_unreference(map->bo);
+}
+
+static void
+intel_miptree_map_s8(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ map->stride = map->w;
+ map->buffer = map->ptr = malloc(map->stride * map->h);
+ if (!map->buffer)
+ return;
+
+ /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
+ * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
+ * invalidate is set, since we'll be writing the whole rectangle from our
+ * temporary buffer back out.
+ */
+ if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
+ uint8_t *untiled_s8_map = map->ptr;
+ uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
+ GL_MAP_READ_BIT);
+ unsigned int image_x, image_y;
+
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
+ x + image_x + map->x,
+ y + image_y + map->y,
+ intel->has_swizzling);
+ untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
+ }
+ }
+
+ intel_region_unmap(intel, mt->region);
+
+ DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
+ } else {
+ DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, map->ptr, map->stride);
+ }
+}
+
+static void
+intel_miptree_unmap_s8(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ unsigned int image_x, image_y;
+ uint8_t *untiled_s8_map = map->ptr;
+ uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
+
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
+ x + map->x,
+ y + map->y,
+ intel->has_swizzling);
+ tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
+ }
+ }
+
+ intel_region_unmap(intel, mt->region);
+ }
+
+ free(map->buffer);
+}
+
+static void
+intel_miptree_map_etc1(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ /* For justification of these invariants,
+ * see intel_mipmap_tree:wraps_etc1.
+ */
+ assert(mt->wraps_etc1);
+ assert(mt->format == MESA_FORMAT_RGBX8888_REV);
+
+ /* From the GL_OES_compressed_ETC1_RGB8_texture spec:
+ * INVALID_OPERATION is generated by CompressedTexSubImage2D,
+ * TexSubImage2D, or CopyTexSubImage2D if the texture image <level>
+ * bound to <target> has internal format ETC1_RGB8_OES.
+ *
+ * This implies that intel_miptree_map_etc1() can only be called from
+ * glCompressedTexImage2D, and hence the assertions below hold.
+ */
+ assert(map->mode & GL_MAP_WRITE_BIT);
+ assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
+ assert(map->x == 0);
+ assert(map->y == 0);
+
+ /* Each ETC1 block contains 4x4 pixels in 8 bytes. */
+ map->stride = 2 * map->w;
+ map->buffer = map->ptr = malloc(map->stride * map->h);
+}
+
+static void
+intel_miptree_unmap_etc1(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ uint32_t image_x;
+ uint32_t image_y;
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+
+ uint8_t *xbgr = intel_region_map(intel, mt->region, map->mode)
+ + image_y * mt->region->pitch * mt->region->cpp
+ + image_x * mt->region->cpp;
+
+ _mesa_etc1_unpack_rgba8888(xbgr, mt->region->pitch * mt->region->cpp,
+ map->ptr, map->stride,
+ map->w, map->h);
+
+ intel_region_unmap(intel, mt->region);
+ free(map->buffer);
+}
+
+/**
+ * Mapping function for packed depth/stencil miptrees backed by real separate
+ * miptrees for depth and stencil.
+ *
+ * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
+ * separate from the depth buffer. Yet at the GL API level, we have to expose
+ * packed depth/stencil textures and FBO attachments, and Mesa core expects to
+ * be able to map that memory for texture storage and glReadPixels-type
+ * operations. We give Mesa core that access by mallocing a temporary and
+ * copying the data between the actual backing store and the temporary.
+ */
+static void
+intel_miptree_map_depthstencil(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ struct intel_mipmap_tree *z_mt = mt;
+ struct intel_mipmap_tree *s_mt = mt->stencil_mt;
+ bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
+ int packed_bpp = map_z32f_x24s8 ? 8 : 4;
+
+ map->stride = map->w * packed_bpp;
+ map->buffer = map->ptr = malloc(map->stride * map->h);
+ if (!map->buffer)
+ return;
+
+ /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
+ * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
+ * invalidate is set, since we'll be writing the whole rectangle from our
+ * temporary buffer back out.
+ */
+ if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
+ uint32_t *packed_map = map->ptr;
+ uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
+ uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
+ unsigned int s_image_x, s_image_y;
+ unsigned int z_image_x, z_image_y;
+
+ intel_miptree_get_image_offset(s_mt, level, 0, slice,
+ &s_image_x, &s_image_y);
+ intel_miptree_get_image_offset(z_mt, level, 0, slice,
+ &z_image_x, &z_image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ int map_x = map->x + x, map_y = map->y + y;
+ ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
+ map_x + s_image_x,
+ map_y + s_image_y,
+ intel->has_swizzling);
+ ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
+ (map_x + z_image_x));
+ uint8_t s = s_map[s_offset];
+ uint32_t z = z_map[z_offset];
+
+ if (map_z32f_x24s8) {
+ packed_map[(y * map->w + x) * 2 + 0] = z;
+ packed_map[(y * map->w + x) * 2 + 1] = s;
+ } else {
+ packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
+ }
+ }
+ }
+
+ intel_region_unmap(intel, s_mt->region);
+ intel_region_unmap(intel, z_mt->region);
+
+ DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
+ __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ z_mt, map->x + z_image_x, map->y + z_image_y,
+ s_mt, map->x + s_image_x, map->y + s_image_y,
+ map->ptr, map->stride);
+ } else {
+ DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, map->ptr, map->stride);
+ }
+}
+
+static void
+intel_miptree_unmap_depthstencil(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_mipmap_tree *z_mt = mt;
+ struct intel_mipmap_tree *s_mt = mt->stencil_mt;
+ bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
+
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ uint32_t *packed_map = map->ptr;
+ uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
+ uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
+ unsigned int s_image_x, s_image_y;
+ unsigned int z_image_x, z_image_y;
+
+ intel_miptree_get_image_offset(s_mt, level, 0, slice,
+ &s_image_x, &s_image_y);
+ intel_miptree_get_image_offset(z_mt, level, 0, slice,
+ &z_image_x, &z_image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
+ x + s_image_x + map->x,
+ y + s_image_y + map->y,
+ intel->has_swizzling);
+ ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
+ (x + z_image_x));
+
+ if (map_z32f_x24s8) {
+ z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
+ s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
+ } else {
+ uint32_t packed = packed_map[y * map->w + x];
+ s_map[s_offset] = packed >> 24;
+ z_map[z_offset] = packed;
+ }
+ }
+ }
+
+ intel_region_unmap(intel, s_mt->region);
+ intel_region_unmap(intel, z_mt->region);
+
+ DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
+ __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ z_mt, _mesa_get_format_name(z_mt->format),
+ map->x + z_image_x, map->y + z_image_y,
+ s_mt, map->x + s_image_x, map->y + s_image_y,
+ map->ptr, map->stride);
+ }
+
+ free(map->buffer);
+}
+
+/**
+ * Create and attach a map to the miptree at (level, slice). Return the
+ * attached map.
+ */
+static struct intel_miptree_map*
+intel_miptree_attach_map(struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode)
+{
+ struct intel_miptree_map *map = calloc(1, sizeof(*map));
+
+ if (!map)
+ return NULL;
+
+ assert(mt->level[level].slice[slice].map == NULL);
+ mt->level[level].slice[slice].map = map;
+
+ map->mode = mode;
+ map->x = x;
+ map->y = y;
+ map->w = w;
+ map->h = h;
+
+ return map;
+}
+
+/**
+ * Release the map at (level, slice).
+ */
+static void
+intel_miptree_release_map(struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map **map;
+
+ map = &mt->level[level].slice[slice].map;
+ free(*map);
+ *map = NULL;
+}
+
+static void
+intel_miptree_map_singlesample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ struct intel_miptree_map *map;
+
+ assert(mt->num_samples <= 1);
+
+ map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+ if (!map){
+ *out_ptr = NULL;
+ *out_stride = 0;
+ return;
+ }
+
+ intel_miptree_slice_resolve_depth(intel, mt, level, slice);
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
+ }
+
+ if (mt->format == MESA_FORMAT_S8) {
+ intel_miptree_map_s8(intel, mt, map, level, slice);
+ } else if (mt->wraps_etc1) {
+ intel_miptree_map_etc1(intel, mt, map, level, slice);
+ } else if (mt->stencil_mt) {
+ intel_miptree_map_depthstencil(intel, mt, map, level, slice);
+ } else if (intel->has_llc &&
+ !(mode & GL_MAP_WRITE_BIT) &&
+ !mt->compressed &&
+ mt->region->tiling == I915_TILING_X) {
+ intel_miptree_map_blit(intel, mt, map, level, slice);
+ } else {
+ intel_miptree_map_gtt(intel, mt, map, level, slice);
+ }
+
+ *out_ptr = map->ptr;
+ *out_stride = map->stride;
+
+ if (map->ptr == NULL)
+ intel_miptree_release_map(mt, level, slice);
+}
+
+static void
+intel_miptree_unmap_singlesample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map *map = mt->level[level].slice[slice].map;
+
+ assert(mt->num_samples <= 1);
+
+ if (!map)
+ return;
+
+ DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
+ mt, _mesa_get_format_name(mt->format), level, slice);
+
+ if (mt->format == MESA_FORMAT_S8) {
+ intel_miptree_unmap_s8(intel, mt, map, level, slice);
+ } else if (mt->wraps_etc1) {
+ intel_miptree_unmap_etc1(intel, mt, map, level, slice);
+ } else if (mt->stencil_mt) {
+ intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
+ } else if (map->bo) {
+ intel_miptree_unmap_blit(intel, mt, map, level, slice);
+ } else {
+ intel_miptree_unmap_gtt(intel, mt, map, level, slice);
+ }
+
+ intel_miptree_release_map(mt, level, slice);
+}
+
+static void
+intel_miptree_map_multisample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ struct intel_miptree_map *map;
+
+ assert(mt->num_samples > 1);
+
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ if (mt->target != GL_TEXTURE_2D ||
+ mt->first_level != 0 ||
+ mt->last_level != 0) {
+ _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
+ "which (target, first_level, last_level != "
+ "(GL_TEXTURE_2D, 0, 0)");
+ goto fail;
+ }
+
+ map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+ if (!map)
+ goto fail;
+
+ if (!mt->singlesample_mt) {
+ mt->singlesample_mt =
+ intel_miptree_create_for_renderbuffer(intel,
+ mt->format,
+ mt->singlesample_width0,
+ mt->singlesample_height0,
+ 0 /*num_samples*/);
+ if (!mt->singlesample_mt)
+ goto fail;
+
+ map->singlesample_mt_is_tmp = true;
+ mt->need_downsample = true;
+ }
+
+ intel_miptree_downsample(intel, mt);
+ intel_miptree_map_singlesample(intel, mt->singlesample_mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+ return;
+
+fail:
+ intel_miptree_release_map(mt, level, slice);
+ *out_ptr = NULL;
+ *out_stride = 0;
+}
+
+static void
+intel_miptree_unmap_multisample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map *map = mt->level[level].slice[slice].map;
+
+ assert(mt->num_samples > 1);
+
+ if (!map)
+ return;
+
+ intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
+
+ mt->need_downsample = false;
+ if (map->mode & GL_MAP_WRITE_BIT)
+ intel_miptree_upsample(intel, mt);
+
+ if (map->singlesample_mt_is_tmp)
+ intel_miptree_release(&mt->singlesample_mt);
+
+ intel_miptree_release_map(mt, level, slice);
+}
+
+void
+intel_miptree_map(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ if (mt->num_samples <= 1)
+ intel_miptree_map_singlesample(intel, mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+ else
+ intel_miptree_map_multisample(intel, mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+}
+
+void
+intel_miptree_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ if (mt->num_samples <= 1)
+ intel_miptree_unmap_singlesample(intel, mt, level, slice);
+ else
+ intel_miptree_unmap_multisample(intel, mt, level, slice);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel.c
index d733c5e8745..bc705105d4d 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel.c
@@ -1 +1,167 @@
-../intel/intel_pixel.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/accum.h"
+#include "main/enums.h"
+#include "main/state.h"
+#include "main/bufferobj.h"
+#include "main/context.h"
+#include "swrast/swrast.h"
+
+#include "intel_context.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+static GLenum
+effective_func(GLenum func, bool src_alpha_is_one)
+{
+ if (src_alpha_is_one) {
+ if (func == GL_SRC_ALPHA)
+ return GL_ONE;
+ if (func == GL_ONE_MINUS_SRC_ALPHA)
+ return GL_ZERO;
+ }
+
+ return func;
+}
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glDraw/CopyPixels.
+ */
+bool
+intel_check_blit_fragment_ops(struct gl_context * ctx, bool src_alpha_is_one)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ if (ctx->FragmentProgram._Enabled) {
+ DBG("fallback due to fragment program\n");
+ return false;
+ }
+
+ if (ctx->Color.BlendEnabled &&
+ (effective_func(ctx->Color.Blend[0].SrcRGB, src_alpha_is_one) != GL_ONE ||
+ effective_func(ctx->Color.Blend[0].DstRGB, src_alpha_is_one) != GL_ZERO ||
+ ctx->Color.Blend[0].EquationRGB != GL_FUNC_ADD ||
+ effective_func(ctx->Color.Blend[0].SrcA, src_alpha_is_one) != GL_ONE ||
+ effective_func(ctx->Color.Blend[0].DstA, src_alpha_is_one) != GL_ZERO ||
+ ctx->Color.Blend[0].EquationA != GL_FUNC_ADD)) {
+ DBG("fallback due to blend\n");
+ return false;
+ }
+
+ if (ctx->Texture._EnabledUnits) {
+ DBG("fallback due to texturing\n");
+ return false;
+ }
+
+ if (!(ctx->Color.ColorMask[0][0] &&
+ ctx->Color.ColorMask[0][1] &&
+ ctx->Color.ColorMask[0][2] &&
+ ctx->Color.ColorMask[0][3])) {
+ DBG("fallback due to color masking\n");
+ return false;
+ }
+
+ if (ctx->Color.AlphaEnabled) {
+ DBG("fallback due to alpha\n");
+ return false;
+ }
+
+ if (ctx->Depth.Test) {
+ DBG("fallback due to depth test\n");
+ return false;
+ }
+
+ if (ctx->Fog.Enabled) {
+ DBG("fallback due to fog\n");
+ return false;
+ }
+
+ if (ctx->_ImageTransferState) {
+ DBG("fallback due to image transfer\n");
+ return false;
+ }
+
+ if (ctx->Stencil._Enabled) {
+ DBG("fallback due to image stencil\n");
+ return false;
+ }
+
+ if (ctx->RenderMode != GL_RENDER) {
+ DBG("fallback due to render mode\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* The intel_region struct doesn't really do enough to capture the
+ * format of the pixels in the region. For now this code assumes that
+ * the region is a display surface and hence is either ARGB8888 or
+ * RGB565.
+ * XXX FBO: If we'd pass in the intel_renderbuffer instead of region, we'd
+ * know the buffer's pixel format.
+ *
+ * \param format as given to glDraw/ReadPixels
+ * \param type as given to glDraw/ReadPixels
+ */
+bool
+intel_check_blit_format(struct intel_region * region,
+ GLenum format, GLenum type)
+{
+ if (region->cpp == 4 &&
+ (type == GL_UNSIGNED_INT_8_8_8_8_REV ||
+ type == GL_UNSIGNED_BYTE) && format == GL_BGRA) {
+ return true;
+ }
+
+ if (region->cpp == 2 &&
+ type == GL_UNSIGNED_SHORT_5_6_5_REV && format == GL_BGR) {
+ return true;
+ }
+
+ DBG("%s: bad format for blit (cpp %d, type %s format %s)\n",
+ __FUNCTION__, region->cpp,
+ _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
+
+ return false;
+}
+
+void
+intelInitPixelFuncs(struct dd_function_table *functions)
+{
+ functions->Accum = _mesa_accum;
+ functions->Bitmap = intelBitmap;
+ functions->CopyPixels = intelCopyPixels;
+ functions->DrawPixels = intelDrawPixels;
+ functions->ReadPixels = intelReadPixels;
+}
+
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c
index 9085c7b0397..954dfc50b14 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_bitmap.c
@@ -1 +1,340 @@
-../intel/intel_pixel_bitmap.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/colormac.h"
+#include "main/condrender.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/pbo.h"
+#include "main/bufferobj.h"
+#include "main/state.h"
+#include "main/texobj.h"
+#include "main/context.h"
+#include "main/fbobject.h"
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_regions.h"
+#include "intel_buffers.h"
+#include "intel_pixel.h"
+#include "intel_reg.h"
+
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+
+/* Unlike the other intel_pixel_* functions, the expectation here is
+ * that the incoming data is not in a PBO. With the XY_TEXT blit
+ * method, there's no benefit haveing it in a PBO, but we could
+ * implement a path based on XY_MONO_SRC_COPY_BLIT which might benefit
+ * PBO bitmaps. I think they are probably pretty rare though - I
+ * wonder if Xgl uses them?
+ */
+static const GLubyte *map_pbo( struct gl_context *ctx,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ GLubyte *buf;
+
+ if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
+ GL_COLOR_INDEX, GL_BITMAP,
+ INT_MAX, (const GLvoid *) bitmap)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,"glBitmap(invalid PBO access)");
+ return NULL;
+ }
+
+ buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
+ GL_MAP_READ_BIT,
+ unpack->BufferObj);
+ if (!buf) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
+ return NULL;
+ }
+
+ return ADD_POINTERS(buf, bitmap);
+}
+
+static bool test_bit( const GLubyte *src, GLuint bit )
+{
+ return (src[bit/8] & (1<<(bit % 8))) ? 1 : 0;
+}
+
+static void set_bit( GLubyte *dest, GLuint bit )
+{
+ dest[bit/8] |= 1 << (bit % 8);
+}
+
+/* Extract a rectangle's worth of data from the bitmap. Called
+ * per chunk of HW-sized bitmap.
+ */
+static GLuint get_bitmap_rect(GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h,
+ GLubyte *dest,
+ GLuint row_align,
+ bool invert)
+{
+ GLuint src_offset = (x + unpack->SkipPixels) & 0x7;
+ GLuint mask = unpack->LsbFirst ? 0 : 7;
+ GLuint bit = 0;
+ GLint row, col;
+ GLint first, last;
+ GLint incr;
+ GLuint count = 0;
+
+ DBG("%s %d,%d %dx%d bitmap %dx%d skip %d src_offset %d mask %d\n",
+ __FUNCTION__, x,y,w,h,width,height,unpack->SkipPixels, src_offset, mask);
+
+ if (invert) {
+ first = h-1;
+ last = 0;
+ incr = -1;
+ }
+ else {
+ first = 0;
+ last = h-1;
+ incr = 1;
+ }
+
+ /* Require that dest be pre-zero'd.
+ */
+ for (row = first; row != (last+incr); row += incr) {
+ const GLubyte *rowsrc = _mesa_image_address2d(unpack, bitmap,
+ width, height,
+ GL_COLOR_INDEX, GL_BITMAP,
+ y + row, x);
+
+ for (col = 0; col < w; col++, bit++) {
+ if (test_bit(rowsrc, (col + src_offset) ^ mask)) {
+ set_bit(dest, bit ^ 7);
+ count++;
+ }
+ }
+
+ if (row_align)
+ bit = ALIGN(bit, row_align);
+ }
+
+ return count;
+}
+
+/**
+ * Returns the low Y value of the vertical range given, flipped according to
+ * whether the framebuffer is or not.
+ */
+static INLINE int
+y_flip(struct gl_framebuffer *fb, int y, int height)
+{
+ if (_mesa_is_user_fbo(fb))
+ return y;
+ else
+ return fb->Height - y - height;
+}
+
+/*
+ * Render a bitmap.
+ */
+static bool
+do_blit_bitmap( struct gl_context *ctx,
+ GLint dstx, GLint dsty,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLfloat tmpColor[4];
+ GLubyte ubcolor[4];
+ GLuint color;
+ GLsizei bitmap_width = width;
+ GLsizei bitmap_height = height;
+ GLint px, py;
+ GLuint stipple[32];
+ GLint orig_dstx = dstx;
+ GLint orig_dsty = dsty;
+
+ /* Update draw buffer bounds */
+ _mesa_update_state(ctx);
+
+ if (ctx->Depth.Test) {
+ /* The blit path produces incorrect results when depth testing is on.
+ * It seems the blit Z coord is always 1.0 (the far plane) so fragments
+ * will likely be obscured by other, closer geometry.
+ */
+ return false;
+ }
+
+ intel_prepare_render(intel);
+ dst = intel_drawbuf_region(intel);
+
+ if (!dst)
+ return false;
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ bitmap = map_pbo(ctx, width, height, unpack, bitmap);
+ if (bitmap == NULL)
+ return true; /* even though this is an error, we're done */
+ }
+
+ COPY_4V(tmpColor, ctx->Current.RasterColor);
+
+ if (_mesa_need_secondary_color(ctx)) {
+ ADD_3V(tmpColor, tmpColor, ctx->Current.RasterSecondaryColor);
+ }
+
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[0], tmpColor[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[1], tmpColor[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[2], tmpColor[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[3], tmpColor[3]);
+
+ if (dst->cpp == 2)
+ color = PACK_COLOR_565(ubcolor[0], ubcolor[1], ubcolor[2]);
+ else
+ color = PACK_COLOR_8888(ubcolor[3], ubcolor[0], ubcolor[1], ubcolor[2]);
+
+ if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F))
+ return false;
+
+ /* Clip to buffer bounds and scissor. */
+ if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
+ fb->_Xmax, fb->_Ymax,
+ &dstx, &dsty, &width, &height))
+ goto out;
+
+ dsty = y_flip(fb, dsty, height);
+
+#define DY 32
+#define DX 32
+
+ /* Chop it all into chunks that can be digested by hardware: */
+ for (py = 0; py < height; py += DY) {
+ for (px = 0; px < width; px += DX) {
+ int h = MIN2(DY, height - py);
+ int w = MIN2(DX, width - px);
+ GLuint sz = ALIGN(ALIGN(w,8) * h, 64)/8;
+ GLenum logic_op = ctx->Color.ColorLogicOpEnabled ?
+ ctx->Color.LogicOp : GL_COPY;
+
+ assert(sz <= sizeof(stipple));
+ memset(stipple, 0, sz);
+
+ /* May need to adjust this when padding has been introduced in
+ * sz above:
+ *
+ * Have to translate destination coordinates back into source
+ * coordinates.
+ */
+ if (get_bitmap_rect(bitmap_width, bitmap_height, unpack,
+ bitmap,
+ -orig_dstx + (dstx + px),
+ -orig_dsty + y_flip(fb, dsty + py, h),
+ w, h,
+ (GLubyte *)stipple,
+ 8,
+ _mesa_is_winsys_fbo(fb)) == 0)
+ continue;
+
+ if (!intelEmitImmediateColorExpandBlit(intel,
+ dst->cpp,
+ (GLubyte *)stipple,
+ sz,
+ color,
+ dst->pitch,
+ dst->bo,
+ 0,
+ dst->tiling,
+ dstx + px,
+ dsty + py,
+ w, h,
+ logic_op)) {
+ return false;
+ }
+ }
+ }
+out:
+
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
+ intel_batchbuffer_flush(intel);
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ /* done with PBO so unmap it now */
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ }
+
+ intel_check_front_buffer_rendering(intel);
+
+ return true;
+}
+
+
+/* There are a large number of possible ways to implement bitmap on
+ * this hardware, most of them have some sort of drawback. Here are a
+ * few that spring to mind:
+ *
+ * Blit:
+ * - XY_MONO_SRC_BLT_CMD
+ * - use XY_SETUP_CLIP_BLT for cliprect clipping.
+ * - XY_TEXT_BLT
+ * - XY_TEXT_IMMEDIATE_BLT
+ * - blit per cliprect, subject to maximum immediate data size.
+ * - XY_COLOR_BLT
+ * - per pixel or run of pixels
+ * - XY_PIXEL_BLT
+ * - good for sparse bitmaps
+ *
+ * 3D engine:
+ * - Point per pixel
+ * - Translate bitmap to an alpha texture and render as a quad
+ * - Chop bitmap up into 32x32 squares and render w/polygon stipple.
+ */
+void
+intelBitmap(struct gl_context * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte * pixels)
+{
+ if (!_mesa_check_conditional_render(ctx))
+ return;
+
+ if (do_blit_bitmap(ctx, x, y, width, height,
+ unpack, pixels))
+ return;
+
+ _mesa_meta_Bitmap(ctx, x, y, width, height, unpack, pixels);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_copy.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_copy.c
index ee433605904..682a52d97d2 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_copy.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_copy.c
@@ -1 +1,230 @@
-../intel/intel_pixel_copy.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/image.h"
+#include "main/state.h"
+#include "main/mtypes.h"
+#include "main/condrender.h"
+#include "main/fbobject.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_fbo.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glCopyPixels. Differs from intel_check_blit_fragment_ops in that
+ * we allow Scissor.
+ */
+static bool
+intel_check_copypixel_blit_fragment_ops(struct gl_context * ctx)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ /* Could do logicop with the blitter:
+ */
+ return !(ctx->_ImageTransferState ||
+ ctx->Color.AlphaEnabled ||
+ ctx->Depth.Test ||
+ ctx->Fog.Enabled ||
+ ctx->Stencil._Enabled ||
+ !ctx->Color.ColorMask[0][0] ||
+ !ctx->Color.ColorMask[0][1] ||
+ !ctx->Color.ColorMask[0][2] ||
+ !ctx->Color.ColorMask[0][3] ||
+ ctx->Texture._EnabledUnits ||
+ ctx->FragmentProgram._Enabled ||
+ ctx->Color.BlendEnabled);
+}
+
+
+/**
+ * CopyPixels with the blitter. Don't support zooming, pixel transfer, etc.
+ */
+static bool
+do_blit_copypixels(struct gl_context * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint dstx, GLint dsty, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ struct gl_framebuffer *read_fb = ctx->ReadBuffer;
+ GLint orig_dstx;
+ GLint orig_dsty;
+ GLint orig_srcx;
+ GLint orig_srcy;
+ bool flip = false;
+ struct intel_renderbuffer *draw_irb = NULL;
+ struct intel_renderbuffer *read_irb = NULL;
+ gl_format read_format, draw_format;
+
+ /* Update draw buffer bounds */
+ _mesa_update_state(ctx);
+
+ switch (type) {
+ case GL_COLOR:
+ if (fb->_NumColorDrawBuffers != 1) {
+ fallback_debug("glCopyPixels() fallback: MRT\n");
+ return false;
+ }
+
+ draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
+ read_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ draw_irb = intel_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
+ read_irb =
+ intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
+ break;
+ case GL_DEPTH:
+ fallback_debug("glCopyPixels() fallback: GL_DEPTH\n");
+ return false;
+ case GL_STENCIL:
+ fallback_debug("glCopyPixels() fallback: GL_STENCIL\n");
+ return false;
+ default:
+ fallback_debug("glCopyPixels(): Unknown type\n");
+ return false;
+ }
+
+ if (!draw_irb) {
+ fallback_debug("glCopyPixels() fallback: missing draw buffer\n");
+ return false;
+ }
+
+ if (!read_irb) {
+ fallback_debug("glCopyPixels() fallback: missing read buffer\n");
+ return false;
+ }
+
+ read_format = intel_rb_format(read_irb);
+ draw_format = intel_rb_format(draw_irb);
+
+ if (draw_format != read_format &&
+ !(draw_format == MESA_FORMAT_XRGB8888 &&
+ read_format == MESA_FORMAT_ARGB8888)) {
+ fallback_debug("glCopyPixels() fallback: mismatched formats (%s -> %s\n",
+ _mesa_get_format_name(read_format),
+ _mesa_get_format_name(draw_format));
+ return false;
+ }
+
+ /* Copypixels can be more than a straight copy. Ensure all the
+ * extra operations are disabled:
+ */
+ if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
+ ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
+ return false;
+
+ intel_prepare_render(intel);
+
+ intel_flush(&intel->ctx);
+
+ /* Clip to destination buffer. */
+ orig_dstx = dstx;
+ orig_dsty = dsty;
+ if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
+ fb->_Xmax, fb->_Ymax,
+ &dstx, &dsty, &width, &height))
+ goto out;
+ /* Adjust src coords for our post-clipped destination origin */
+ srcx += dstx - orig_dstx;
+ srcy += dsty - orig_dsty;
+
+ /* Clip to source buffer. */
+ orig_srcx = srcx;
+ orig_srcy = srcy;
+ if (!_mesa_clip_to_region(0, 0,
+ read_fb->Width, read_fb->Height,
+ &srcx, &srcy, &width, &height))
+ goto out;
+ /* Adjust dst coords for our post-clipped source origin */
+ dstx += srcx - orig_srcx;
+ dsty += srcy - orig_srcy;
+
+ /* Flip dest Y if it's a window system framebuffer. */
+ if (_mesa_is_winsys_fbo(fb)) {
+ /* copypixels to a window system framebuffer */
+ dsty = fb->Height - dsty - height;
+ flip = !flip;
+ }
+
+ /* Flip source Y if it's a window system framebuffer. */
+ if (_mesa_is_winsys_fbo(read_fb)) {
+ srcy = read_fb->Height - srcy - height;
+ flip = !flip;
+ }
+
+ srcx += read_irb->draw_x;
+ srcy += read_irb->draw_y;
+ dstx += draw_irb->draw_x;
+ dsty += draw_irb->draw_y;
+
+ if (!intel_region_copy(intel,
+ draw_irb->mt->region, 0, dstx, dsty,
+ read_irb->mt->region, 0, srcx, srcy,
+ width, height, flip,
+ ctx->Color.ColorLogicOpEnabled ?
+ ctx->Color.LogicOp : GL_COPY)) {
+ DBG("%s: blit failure\n", __FUNCTION__);
+ return false;
+ }
+
+out:
+ intel_check_front_buffer_rendering(intel);
+
+ DBG("%s: success\n", __FUNCTION__);
+ return true;
+}
+
+
+void
+intelCopyPixels(struct gl_context * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint destx, GLint desty, GLenum type)
+{
+ DBG("%s\n", __FUNCTION__);
+
+ if (!_mesa_check_conditional_render(ctx))
+ return;
+
+ if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
+ return;
+
+ /* this will use swrast if needed */
+ _mesa_meta_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_draw.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_draw.c
index 8431a24edfc..2ec7ed8e269 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_draw.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_draw.c
@@ -1 +1,58 @@
-../intel/intel_pixel_draw.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/mtypes.h"
+#include "main/teximage.h"
+#include "main/texobj.h"
+#include "main/texstate.h"
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_pixel.h"
+
+void
+intelDrawPixels(struct gl_context * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format,
+ GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels)
+{
+ if (format == GL_STENCIL_INDEX) {
+ _swrast_DrawPixels(ctx, x, y, width, height, format, type,
+ unpack, pixels);
+ return;
+ }
+
+ _mesa_meta_DrawPixels(ctx, x, y, width, height, format, type,
+ unpack, pixels);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_read.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_read.c
index cc4589f4d42..ab4e581c400 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_read.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_pixel_read.c
@@ -1 +1,205 @@
-../intel/intel_pixel_read.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/fbobject.h"
+#include "main/image.h"
+#include "main/bufferobj.h"
+#include "main/readpix.h"
+#include "main/state.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_buffer_objects.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+/* For many applications, the new ability to pull the source buffers
+ * back out of the GTT and then do the packing/conversion operations
+ * in software will be as much of an improvement as trying to get the
+ * blitter and/or texture engine to do the work.
+ *
+ * This step is gated on private backbuffers.
+ *
+ * Obviously the frontbuffer can't be pulled back, so that is either
+ * an argument for blit/texture readpixels, or for blitting to a
+ * temporary and then pulling that back.
+ *
+ * When the destination is a pbo, however, it's not clear if it is
+ * ever going to be pulled to main memory (though the access param
+ * will be a good hint). So it sounds like we do want to be able to
+ * choose between blit/texture implementation on the gpu and pullback
+ * and cpu-based copying.
+ *
+ * Unless you can magically turn client memory into a PBO for the
+ * duration of this call, there will be a cpu-based copying step in
+ * any case.
+ */
+
+static bool
+do_blit_readpixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *src = intel_readbuf_region(intel);
+ struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
+ GLuint dst_offset;
+ GLuint rowLength;
+ drm_intel_bo *dst_buffer;
+ bool all;
+ GLint dst_x, dst_y;
+ GLuint dirty;
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (!src)
+ return false;
+
+ if (!_mesa_is_bufferobj(pack->BufferObj)) {
+ /* PBO only for now:
+ */
+ DBG("%s - not PBO\n", __FUNCTION__);
+ return false;
+ }
+
+
+ if (ctx->_ImageTransferState ||
+ !intel_check_blit_format(src, format, type)) {
+ DBG("%s - bad format for blit\n", __FUNCTION__);
+ return false;
+ }
+
+ if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
+ DBG("%s: bad packing params\n", __FUNCTION__);
+ return false;
+ }
+
+ if (pack->RowLength > 0)
+ rowLength = pack->RowLength;
+ else
+ rowLength = width;
+
+ if (pack->Invert) {
+ DBG("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
+ return false;
+ }
+ else {
+ if (_mesa_is_winsys_fbo(ctx->ReadBuffer))
+ rowLength = -rowLength;
+ }
+
+ dst_offset = (GLintptr)pixels;
+ dst_offset += _mesa_image_offset(2, pack, width, height,
+ format, type, 0, 0, 0);
+
+ if (!_mesa_clip_copytexsubimage(ctx,
+ &dst_x, &dst_y,
+ &x, &y,
+ &width, &height)) {
+ return true;
+ }
+
+ dirty = intel->front_buffer_dirty;
+ intel_prepare_render(intel);
+ intel->front_buffer_dirty = dirty;
+
+ all = (width * height * src->cpp == dst->Base.Size &&
+ x == 0 && dst_offset == 0);
+
+ dst_x = 0;
+ dst_y = 0;
+
+ dst_buffer = intel_bufferobj_buffer(intel, dst,
+ all ? INTEL_WRITE_FULL :
+ INTEL_WRITE_PART);
+
+ if (_mesa_is_winsys_fbo(ctx->ReadBuffer))
+ y = ctx->ReadBuffer->Height - (y + height);
+
+ if (!intelEmitCopyBlit(intel,
+ src->cpp,
+ src->pitch, src->bo, 0, src->tiling,
+ rowLength, dst_buffer, dst_offset, false,
+ x, y,
+ dst_x, dst_y,
+ width, height,
+ GL_COPY)) {
+ return false;
+ }
+
+ DBG("%s - DONE\n", __FUNCTION__);
+
+ return true;
+}
+
+void
+intelReadPixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ bool dirty;
+
+ intel_flush_rendering_to_batch(ctx);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (do_blit_readpixels
+ (ctx, x, y, width, height, format, type, pack, pixels))
+ return;
+
+ /* glReadPixels() wont dirty the front buffer, so reset the dirty
+ * flag after calling intel_prepare_render(). */
+ dirty = intel->front_buffer_dirty;
+ intel_prepare_render(intel);
+ intel->front_buffer_dirty = dirty;
+
+ fallback_debug("%s: fallback to swrast\n", __FUNCTION__);
+
+ /* Update Mesa state before calling _mesa_readpixels().
+ * XXX this may not be needed since ReadPixels no longer uses the
+ * span code.
+ */
+
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);
+
+ /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
+ intel->front_buffer_dirty = dirty;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_regions.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_regions.c
index 89b2f15c10f..7cb008c2f71 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_regions.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_regions.c
@@ -1 +1,473 @@
-../intel/intel_regions.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Provide additional functionality on top of bufmgr buffers:
+ * - 2d semantics and blit operations
+ * - refcounting of buffers for multiple images in a buffer.
+ * - refcounting of buffer mappings.
+ * - some logic for moving the buffers to the best memory pools for
+ * given operations.
+ *
+ * Most of this is to make it easier to implement the fixed-layout
+ * mipmap tree required by intel hardware in the face of GL's
+ * programming interface where each image can be specifed in random
+ * order and it isn't clear what layout the tree should have until the
+ * last moment.
+ */
+
+#include <sys/ioctl.h>
+#include <errno.h>
+
+#include "main/hash.h"
+#include "intel_context.h"
+#include "intel_regions.h"
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "intel_bufmgr.h"
+#include "intel_batchbuffer.h"
+
+#define FILE_DEBUG_FLAG DEBUG_REGION
+
+/* This should be set to the maximum backtrace size desired.
+ * Set it to 0 to disable backtrace debugging.
+ */
+#define DEBUG_BACKTRACE_SIZE 0
+
+#if DEBUG_BACKTRACE_SIZE == 0
+/* Use the standard debug output */
+#define _DBG(...) DBG(__VA_ARGS__)
+#else
+/* Use backtracing debug output */
+#define _DBG(...) {debug_backtrace(); DBG(__VA_ARGS__);}
+
+/* Backtracing debug support */
+#include <execinfo.h>
+
+static void
+debug_backtrace(void)
+{
+ void *trace[DEBUG_BACKTRACE_SIZE];
+ char **strings = NULL;
+ int traceSize;
+ register int i;
+
+ traceSize = backtrace(trace, DEBUG_BACKTRACE_SIZE);
+ strings = backtrace_symbols(trace, traceSize);
+ if (strings == NULL) {
+ DBG("no backtrace:");
+ return;
+ }
+
+ /* Spit out all the strings with a colon separator. Ignore
+ * the first, since we don't really care about the call
+ * to debug_backtrace() itself. Skip until the final "/" in
+ * the trace to avoid really long lines.
+ */
+ for (i = 1; i < traceSize; i++) {
+ char *p = strings[i], *slash = strings[i];
+ while (*p) {
+ if (*p++ == '/') {
+ slash = p;
+ }
+ }
+
+ DBG("%s:", slash);
+ }
+
+ /* Free up the memory, and we're done */
+ free(strings);
+}
+
+#endif
+
+
+
+/* XXX: Thread safety?
+ */
+void *
+intel_region_map(struct intel_context *intel, struct intel_region *region,
+ GLbitfield mode)
+{
+ /* We have the region->map_refcount controlling mapping of the BO because
+ * in software fallbacks we may end up mapping the same buffer multiple
+ * times on Mesa's behalf, so we refcount our mappings to make sure that
+ * the pointer stays valid until the end of the unmap chain. However, we
+ * must not emit any batchbuffers between the start of mapping and the end
+ * of unmapping, or further use of the map will be incoherent with the GPU
+ * rendering done by that batchbuffer. Hence we assert in
+ * intel_batchbuffer_flush() that that doesn't happen, which means that the
+ * flush is only needed on first map of the buffer.
+ */
+
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ if (drm_intel_bo_busy(region->bo)) {
+ perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
+ }
+ }
+
+ _DBG("%s %p\n", __FUNCTION__, region);
+ if (!region->map_refcount) {
+ intel_flush(&intel->ctx);
+
+ if (region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_map_gtt(region->bo);
+ else
+ drm_intel_bo_map(region->bo, true);
+
+ region->map = region->bo->virtual;
+ }
+ if (region->map) {
+ intel->num_mapped_regions++;
+ region->map_refcount++;
+ }
+
+ return region->map;
+}
+
+void
+intel_region_unmap(struct intel_context *intel, struct intel_region *region)
+{
+ _DBG("%s %p\n", __FUNCTION__, region);
+ if (!--region->map_refcount) {
+ if (region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_unmap_gtt(region->bo);
+ else
+ drm_intel_bo_unmap(region->bo);
+
+ region->map = NULL;
+ --intel->num_mapped_regions;
+ assert(intel->num_mapped_regions >= 0);
+ }
+}
+
+static struct intel_region *
+intel_region_alloc_internal(struct intel_screen *screen,
+ GLuint cpp,
+ GLuint width, GLuint height, GLuint pitch,
+ uint32_t tiling, drm_intel_bo *buffer)
+{
+ struct intel_region *region;
+
+ region = calloc(sizeof(*region), 1);
+ if (region == NULL)
+ return region;
+
+ region->cpp = cpp;
+ region->width = width;
+ region->height = height;
+ region->pitch = pitch;
+ region->refcount = 1;
+ region->bo = buffer;
+ region->tiling = tiling;
+ region->screen = screen;
+
+ _DBG("%s <-- %p\n", __FUNCTION__, region);
+ return region;
+}
+
+struct intel_region *
+intel_region_alloc(struct intel_screen *screen,
+ uint32_t tiling,
+ GLuint cpp, GLuint width, GLuint height,
+ bool expect_accelerated_upload)
+{
+ drm_intel_bo *buffer;
+ unsigned long flags = 0;
+ unsigned long aligned_pitch;
+ struct intel_region *region;
+
+ if (expect_accelerated_upload)
+ flags |= BO_ALLOC_FOR_RENDER;
+
+ buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "region",
+ width, height, cpp,
+ &tiling, &aligned_pitch, flags);
+ if (buffer == NULL)
+ return NULL;
+
+ region = intel_region_alloc_internal(screen, cpp, width, height,
+ aligned_pitch / cpp, tiling, buffer);
+ if (region == NULL) {
+ drm_intel_bo_unreference(buffer);
+ return NULL;
+ }
+
+ return region;
+}
+
+bool
+intel_region_flink(struct intel_region *region, uint32_t *name)
+{
+ if (region->name == 0) {
+ if (drm_intel_bo_flink(region->bo, &region->name))
+ return false;
+
+ _mesa_HashInsert(region->screen->named_regions,
+ region->name, region);
+ }
+
+ *name = region->name;
+
+ return true;
+}
+
+struct intel_region *
+intel_region_alloc_for_handle(struct intel_screen *screen,
+ GLuint cpp,
+ GLuint width, GLuint height, GLuint pitch,
+ GLuint handle, const char *name)
+{
+ struct intel_region *region, *dummy;
+ drm_intel_bo *buffer;
+ int ret;
+ uint32_t bit_6_swizzle, tiling;
+
+ region = _mesa_HashLookup(screen->named_regions, handle);
+ if (region != NULL) {
+ dummy = NULL;
+ if (region->width != width || region->height != height ||
+ region->cpp != cpp || region->pitch != pitch) {
+ fprintf(stderr,
+ "Region for name %d already exists but is not compatible\n",
+ handle);
+ return NULL;
+ }
+ intel_region_reference(&dummy, region);
+ return dummy;
+ }
+
+ buffer = intel_bo_gem_create_from_name(screen->bufmgr, name, handle);
+ if (buffer == NULL)
+ return NULL;
+ ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle);
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
+ handle, name, strerror(-ret));
+ drm_intel_bo_unreference(buffer);
+ return NULL;
+ }
+
+ region = intel_region_alloc_internal(screen, cpp,
+ width, height, pitch, tiling, buffer);
+ if (region == NULL) {
+ drm_intel_bo_unreference(buffer);
+ return NULL;
+ }
+
+ region->name = handle;
+ _mesa_HashInsert(screen->named_regions, handle, region);
+
+ return region;
+}
+
+void
+intel_region_reference(struct intel_region **dst, struct intel_region *src)
+{
+ _DBG("%s: %p(%d) -> %p(%d)\n", __FUNCTION__,
+ *dst, *dst ? (*dst)->refcount : 0, src, src ? src->refcount : 0);
+
+ if (src != *dst) {
+ if (*dst)
+ intel_region_release(dst);
+
+ if (src)
+ src->refcount++;
+ *dst = src;
+ }
+}
+
+void
+intel_region_release(struct intel_region **region_handle)
+{
+ struct intel_region *region = *region_handle;
+
+ if (region == NULL) {
+ _DBG("%s NULL\n", __FUNCTION__);
+ return;
+ }
+
+ _DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
+
+ ASSERT(region->refcount > 0);
+ region->refcount--;
+
+ if (region->refcount == 0) {
+ assert(region->map_refcount == 0);
+
+ drm_intel_bo_unreference(region->bo);
+
+ if (region->name > 0)
+ _mesa_HashRemove(region->screen->named_regions, region->name);
+
+ free(region);
+ }
+ *region_handle = NULL;
+}
+
+/*
+ * XXX Move this into core Mesa?
+ */
+void
+_mesa_copy_rect(GLubyte * dst,
+ GLuint cpp,
+ GLuint dst_pitch,
+ GLuint dst_x,
+ GLuint dst_y,
+ GLuint width,
+ GLuint height,
+ const GLubyte * src,
+ GLuint src_pitch, GLuint src_x, GLuint src_y)
+{
+ GLuint i;
+
+ dst_pitch *= cpp;
+ src_pitch *= cpp;
+ dst += dst_x * cpp;
+ src += src_x * cpp;
+ dst += dst_y * dst_pitch;
+ src += src_y * src_pitch;
+ width *= cpp;
+
+ if (width == dst_pitch && width == src_pitch)
+ memcpy(dst, src, height * width);
+ else {
+ for (i = 0; i < height; i++) {
+ memcpy(dst, src, width);
+ dst += dst_pitch;
+ src += src_pitch;
+ }
+ }
+}
+
+/* Copy rectangular sub-regions. Need better logic about when to
+ * push buffers into AGP - will currently do so whenever possible.
+ */
+bool
+intel_region_copy(struct intel_context *intel,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ struct intel_region *src,
+ GLuint src_offset,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height,
+ bool flip,
+ GLenum logicop)
+{
+ uint32_t src_pitch = src->pitch;
+
+ _DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return false;
+
+ assert(src->cpp == dst->cpp);
+
+ if (flip)
+ src_pitch = -src_pitch;
+
+ return intelEmitCopyBlit(intel,
+ dst->cpp,
+ src_pitch, src->bo, src_offset, src->tiling,
+ dst->pitch, dst->bo, dst_offset, dst->tiling,
+ srcx, srcy, dstx, dsty, width, height,
+ logicop);
+}
+
+/**
+ * This function computes masks that may be used to select the bits of the X
+ * and Y coordinates that indicate the offset within a tile. If the region is
+ * untiled, the masks are set to 0.
+ */
+void
+intel_region_get_tile_masks(struct intel_region *region,
+ uint32_t *mask_x, uint32_t *mask_y,
+ bool map_stencil_as_y_tiled)
+{
+ int cpp = region->cpp;
+ uint32_t tiling = region->tiling;
+
+ if (map_stencil_as_y_tiled)
+ tiling = I915_TILING_Y;
+
+ switch (tiling) {
+ default:
+ assert(false);
+ case I915_TILING_NONE:
+ *mask_x = *mask_y = 0;
+ break;
+ case I915_TILING_X:
+ *mask_x = 512 / cpp - 1;
+ *mask_y = 7;
+ break;
+ case I915_TILING_Y:
+ *mask_x = 128 / cpp - 1;
+ *mask_y = 31;
+ break;
+ }
+}
+
+/**
+ * Compute the offset (in bytes) from the start of the region to the given x
+ * and y coordinate. For tiled regions, caller must ensure that x and y are
+ * multiples of the tile size.
+ */
+uint32_t
+intel_region_get_aligned_offset(struct intel_region *region, uint32_t x,
+ uint32_t y, bool map_stencil_as_y_tiled)
+{
+ int cpp = region->cpp;
+ uint32_t pitch = region->pitch * cpp;
+ uint32_t tiling = region->tiling;
+
+ if (map_stencil_as_y_tiled) {
+ tiling = I915_TILING_Y;
+
+ /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
+ * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
+ * the resulting region is twice the pitch of the original region, since
+ * each row in the Y-tiled view corresponds to two rows in the actual
+ * W-tiled surface. So we need to correct the pitch before computing
+ * the offsets.
+ */
+ pitch *= 2;
+ }
+
+ switch (tiling) {
+ default:
+ assert(false);
+ case I915_TILING_NONE:
+ return y * pitch + x * cpp;
+ case I915_TILING_X:
+ assert((x % (512 / cpp)) == 0);
+ assert((y % 8) == 0);
+ return y * pitch + x / (512 / cpp) * 4096;
+ case I915_TILING_Y:
+ assert((x % (128 / cpp)) == 0);
+ assert((y % 32) == 0);
+ return y * pitch + x / (128 / cpp) * 4096;
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_resolve_map.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_resolve_map.c
index 77e50fbaea4..04b5c942432 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_resolve_map.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_resolve_map.c
@@ -1 +1,111 @@
-../intel/intel_resolve_map.c \ No newline at end of file
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "intel_resolve_map.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+/**
+ * \brief Set that the miptree slice at (level, layer) needs a resolve.
+ *
+ * If a map element already exists with the given key, then the value is
+ * changed to the given value of \c need.
+ */
+void
+intel_resolve_map_set(struct intel_resolve_map *head,
+ uint32_t level,
+ uint32_t layer,
+ enum gen6_hiz_op need)
+{
+ struct intel_resolve_map **tail = &head->next;
+ struct intel_resolve_map *prev = head;
+
+ while (*tail) {
+ if ((*tail)->level == level && (*tail)->layer == layer) {
+ (*tail)->need = need;
+ return;
+ }
+ prev = *tail;
+ tail = &(*tail)->next;
+ }
+
+ *tail = malloc(sizeof(**tail));
+ (*tail)->prev = prev;
+ (*tail)->next = NULL;
+ (*tail)->level = level;
+ (*tail)->layer = layer;
+ (*tail)->need = need;
+}
+
+/**
+ * \brief Get an element from the map.
+ * \return null if element is not contained in map.
+ */
+struct intel_resolve_map*
+intel_resolve_map_get(struct intel_resolve_map *head,
+ uint32_t level,
+ uint32_t layer)
+{
+ struct intel_resolve_map *item = head->next;
+
+ while (item) {
+ if (item->level == level && item->layer == layer)
+ break;
+ else
+ item = item->next;
+ }
+
+ return item;
+}
+
+/**
+ * \brief Remove and free an element from the map.
+ */
+void
+intel_resolve_map_remove(struct intel_resolve_map *elem)
+{
+ if (elem->prev)
+ elem->prev->next = elem->next;
+ if (elem->next)
+ elem->next->prev = elem->prev;
+ free(elem);
+}
+
+/**
+ * \brief Remove and free all elements of the map.
+ */
+void
+intel_resolve_map_clear(struct intel_resolve_map *head)
+{
+ struct intel_resolve_map *next = head->next;
+ struct intel_resolve_map *trash;
+
+ while (next) {
+ trash = next;
+ next = next->next;
+ free(trash);
+ }
+
+ head->next = NULL;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_screen.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_screen.c
index f2db48272b9..00a1b0122ec 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_screen.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_screen.c
@@ -1 +1,1204 @@
-../intel/intel_screen.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <errno.h>
+#include <time.h>
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/hash.h"
+#include "main/fbobject.h"
+#include "main/mfeatures.h"
+#include "main/version.h"
+#include "swrast/s_renderbuffer.h"
+
+#include "utils.h"
+#include "xmlpool.h"
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN
+ DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
+ /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
+ * DRI_CONF_BO_REUSE_ALL
+ */
+ DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
+ DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
+ DRI_CONF_ENUM(0, "Disable buffer object reuse")
+ DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
+ DRI_CONF_DESC_END
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(texture_tiling, bool, true)
+ DRI_CONF_DESC(en, "Enable texture tiling")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(hiz, bool, true)
+ DRI_CONF_DESC(en, "Enable Hierarchical Z on gen6+")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(early_z, bool, false)
+ DRI_CONF_DESC(en, "Enable early Z in classic mode (unstable, 945-only).")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(fragment_shader, bool, true)
+ DRI_CONF_DESC(en, "Enable limited ARB_fragment_shader support on 915/945.")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_QUALITY
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_ALLOW_LARGE_TEXTURES(2)
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_DEBUG
+ DRI_CONF_NO_RAST(false)
+ DRI_CONF_ALWAYS_FLUSH_BATCH(false)
+ DRI_CONF_ALWAYS_FLUSH_CACHE(false)
+ DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN(false)
+ DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED(false)
+
+ DRI_CONF_OPT_BEGIN(stub_occlusion_query, bool, false)
+ DRI_CONF_DESC(en, "Enable stub ARB_occlusion_query support on 915/945.")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(shader_precompile, bool, true)
+ DRI_CONF_DESC(en, "Perform code generation at shader link time.")
+ DRI_CONF_OPT_END
+ DRI_CONF_SECTION_END
+DRI_CONF_END;
+
+const GLuint __driNConfigOptions = 15;
+
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_bufmgr.h"
+#include "intel_chipset.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_screen.h"
+#include "intel_tex.h"
+#include "intel_regions.h"
+
+#include "i915_drm.h"
+
+#ifdef USE_NEW_INTERFACE
+static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
+#endif /*USE_NEW_INTERFACE */
+
+/**
+ * For debugging purposes, this returns a time in seconds.
+ */
+double
+get_time(void)
+{
+ struct timespec tp;
+
+ clock_gettime(CLOCK_MONOTONIC, &tp);
+
+ return tp.tv_sec + tp.tv_nsec / 1000000000.0;
+}
+
+void
+aub_dump_bmp(struct gl_context *ctx)
+{
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb && irb->mt) {
+ enum aub_dump_bmp_format format;
+
+ switch (irb->Base.Base.Format) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
+ break;
+ default:
+ continue;
+ }
+
+ drm_intel_gem_bo_aub_dump_bmp(irb->mt->region->bo,
+ irb->draw_x,
+ irb->draw_y,
+ irb->Base.Base.Width,
+ irb->Base.Base.Height,
+ format,
+ irb->mt->region->pitch *
+ irb->mt->region->cpp,
+ 0);
+ }
+ }
+}
+
+static const __DRItexBufferExtension intelTexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ intelSetTexBuffer,
+ intelSetTexBuffer2,
+};
+
+static void
+intelDRI2Flush(__DRIdrawable *drawable)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel = intel_context(ctx);
+ if (intel == NULL)
+ return;
+
+ if (intel->gen < 4)
+ INTEL_FIREVERTICES(intel);
+
+ intel_downsample_for_dri2_flush(intel, drawable);
+ intel->need_throttle = true;
+
+ if (intel->batch.used)
+ intel_batchbuffer_flush(intel);
+
+ if (INTEL_DEBUG & DEBUG_AUB) {
+ aub_dump_bmp(ctx);
+ }
+}
+
+static const struct __DRI2flushExtensionRec intelFlushExtension = {
+ { __DRI2_FLUSH, __DRI2_FLUSH_VERSION },
+ intelDRI2Flush,
+ dri2InvalidateDrawable,
+};
+
+struct intel_image_format intel_image_formats[] = {
+ { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
+
+ { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
+
+ { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
+
+ { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
+
+ /* For YUYV buffers, we set up two overlapping DRI images and treat
+ * them as planar buffers in the compositors. Plane 0 is GR88 and
+ * samples YU or YV pairs and places Y into the R component, while
+ * plane 1 is ARGB and samples YUYV clusters and places pairs and
+ * places U into the G component and V into A. This lets the
+ * texture sampler interpolate the Y components correctly when
+ * sampling from plane 0, and interpolate U and V correctly when
+ * sampling from plane 1. */
+ { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
+ { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
+};
+
+static __DRIimage *
+intel_allocate_image(int dri_format, void *loaderPrivate)
+{
+ __DRIimage *image;
+
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ image->dri_format = dri_format;
+ image->offset = 0;
+
+ switch (dri_format) {
+ case __DRI_IMAGE_FORMAT_RGB565:
+ image->format = MESA_FORMAT_RGB565;
+ break;
+ case __DRI_IMAGE_FORMAT_XRGB8888:
+ image->format = MESA_FORMAT_XRGB8888;
+ break;
+ case __DRI_IMAGE_FORMAT_ARGB8888:
+ image->format = MESA_FORMAT_ARGB8888;
+ break;
+ case __DRI_IMAGE_FORMAT_ABGR8888:
+ image->format = MESA_FORMAT_RGBA8888_REV;
+ break;
+ case __DRI_IMAGE_FORMAT_XBGR8888:
+ image->format = MESA_FORMAT_RGBX8888_REV;
+ break;
+ case __DRI_IMAGE_FORMAT_R8:
+ image->format = MESA_FORMAT_R8;
+ break;
+ case __DRI_IMAGE_FORMAT_GR88:
+ image->format = MESA_FORMAT_GR88;
+ break;
+ case __DRI_IMAGE_FORMAT_NONE:
+ image->format = MESA_FORMAT_NONE;
+ break;
+ default:
+ free(image);
+ return NULL;
+ }
+
+ image->internal_format = _mesa_get_format_base_format(image->format);
+ image->data = loaderPrivate;
+
+ return image;
+}
+
+static __DRIimage *
+intel_create_image_from_name(__DRIscreen *screen,
+ int width, int height, int format,
+ int name, int pitch, void *loaderPrivate)
+{
+ struct intel_screen *intelScreen = screen->driverPrivate;
+ __DRIimage *image;
+ int cpp;
+
+ image = intel_allocate_image(format, loaderPrivate);
+ if (image->format == MESA_FORMAT_NONE)
+ cpp = 1;
+ else
+ cpp = _mesa_get_format_bytes(image->format);
+ image->region = intel_region_alloc_for_handle(intelScreen,
+ cpp, width, height,
+ pitch, name, "image");
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ return image;
+}
+
+static __DRIimage *
+intel_create_image_from_renderbuffer(__DRIcontext *context,
+ int renderbuffer, void *loaderPrivate)
+{
+ __DRIimage *image;
+ struct intel_context *intel = context->driverPrivate;
+ struct gl_renderbuffer *rb;
+ struct intel_renderbuffer *irb;
+
+ rb = _mesa_lookup_renderbuffer(&intel->ctx, renderbuffer);
+ if (!rb) {
+ _mesa_error(&intel->ctx,
+ GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
+ return NULL;
+ }
+
+ irb = intel_renderbuffer(rb);
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ image->internal_format = rb->InternalFormat;
+ image->format = rb->Format;
+ image->offset = 0;
+ image->data = loaderPrivate;
+ intel_region_reference(&image->region, irb->mt->region);
+
+ switch (image->format) {
+ case MESA_FORMAT_RGB565:
+ image->dri_format = __DRI_IMAGE_FORMAT_RGB565;
+ break;
+ case MESA_FORMAT_XRGB8888:
+ image->dri_format = __DRI_IMAGE_FORMAT_XRGB8888;
+ break;
+ case MESA_FORMAT_ARGB8888:
+ image->dri_format = __DRI_IMAGE_FORMAT_ARGB8888;
+ break;
+ case MESA_FORMAT_RGBA8888_REV:
+ image->dri_format = __DRI_IMAGE_FORMAT_ABGR8888;
+ break;
+ case MESA_FORMAT_R8:
+ image->dri_format = __DRI_IMAGE_FORMAT_R8;
+ break;
+ case MESA_FORMAT_RG88:
+ image->dri_format = __DRI_IMAGE_FORMAT_GR88;
+ break;
+ }
+
+ return image;
+}
+
+static void
+intel_destroy_image(__DRIimage *image)
+{
+ intel_region_release(&image->region);
+ FREE(image);
+}
+
+static __DRIimage *
+intel_create_image(__DRIscreen *screen,
+ int width, int height, int format,
+ unsigned int use,
+ void *loaderPrivate)
+{
+ __DRIimage *image;
+ struct intel_screen *intelScreen = screen->driverPrivate;
+ uint32_t tiling;
+ int cpp;
+
+ tiling = I915_TILING_X;
+ if (use & __DRI_IMAGE_USE_CURSOR) {
+ if (width != 64 || height != 64)
+ return NULL;
+ tiling = I915_TILING_NONE;
+ }
+
+ image = intel_allocate_image(format, loaderPrivate);
+ cpp = _mesa_get_format_bytes(image->format);
+ image->region =
+ intel_region_alloc(intelScreen, tiling, cpp, width, height, true);
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ return image;
+}
+
+static GLboolean
+intel_query_image(__DRIimage *image, int attrib, int *value)
+{
+ switch (attrib) {
+ case __DRI_IMAGE_ATTRIB_STRIDE:
+ *value = image->region->pitch * image->region->cpp;
+ return true;
+ case __DRI_IMAGE_ATTRIB_HANDLE:
+ *value = image->region->bo->handle;
+ return true;
+ case __DRI_IMAGE_ATTRIB_NAME:
+ return intel_region_flink(image->region, (uint32_t *) value);
+ case __DRI_IMAGE_ATTRIB_FORMAT:
+ *value = image->dri_format;
+ return true;
+ case __DRI_IMAGE_ATTRIB_WIDTH:
+ *value = image->region->width;
+ return true;
+ case __DRI_IMAGE_ATTRIB_HEIGHT:
+ *value = image->region->height;
+ return true;
+ case __DRI_IMAGE_ATTRIB_COMPONENTS:
+ if (image->planar_format == NULL)
+ return false;
+ *value = image->planar_format->components;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static __DRIimage *
+intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
+{
+ __DRIimage *image;
+
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ intel_region_reference(&image->region, orig_image->region);
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ image->internal_format = orig_image->internal_format;
+ image->planar_format = orig_image->planar_format;
+ image->dri_format = orig_image->dri_format;
+ image->format = orig_image->format;
+ image->offset = orig_image->offset;
+ image->data = loaderPrivate;
+
+ memcpy(image->strides, orig_image->strides, sizeof(image->strides));
+ memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
+
+ return image;
+}
+
+static GLboolean
+intel_validate_usage(__DRIimage *image, unsigned int use)
+{
+ if (use & __DRI_IMAGE_USE_CURSOR) {
+ if (image->region->width != 64 || image->region->height != 64)
+ return GL_FALSE;
+ }
+
+ return GL_TRUE;
+}
+
+static __DRIimage *
+intel_create_image_from_names(__DRIscreen *screen,
+ int width, int height, int fourcc,
+ int *names, int num_names,
+ int *strides, int *offsets,
+ void *loaderPrivate)
+{
+ struct intel_image_format *f = NULL;
+ __DRIimage *image;
+ int i, index;
+
+ if (screen == NULL || names == NULL || num_names != 1)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
+ if (intel_image_formats[i].fourcc == fourcc) {
+ f = &intel_image_formats[i];
+ }
+ }
+
+ if (f == NULL)
+ return NULL;
+
+ image = intel_create_image_from_name(screen, width, height,
+ __DRI_IMAGE_FORMAT_NONE,
+ names[0], strides[0],
+ loaderPrivate);
+
+ if (image == NULL)
+ return NULL;
+
+ image->planar_format = f;
+ for (i = 0; i < f->nplanes; i++) {
+ index = f->planes[i].buffer_index;
+ image->offsets[index] = offsets[index];
+ image->strides[index] = strides[index];
+ }
+
+ return image;
+}
+
+static __DRIimage *
+intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
+{
+ int width, height, offset, stride, dri_format, cpp, index, pitch;
+ struct intel_image_format *f;
+ uint32_t mask_x, mask_y;
+ __DRIimage *image;
+
+ if (parent == NULL || parent->planar_format == NULL)
+ return NULL;
+
+ f = parent->planar_format;
+
+ if (plane >= f->nplanes)
+ return NULL;
+
+ width = parent->region->width >> f->planes[plane].width_shift;
+ height = parent->region->height >> f->planes[plane].height_shift;
+ dri_format = f->planes[plane].dri_format;
+ index = f->planes[plane].buffer_index;
+ offset = parent->offsets[index];
+ stride = parent->strides[index];
+
+ image = intel_allocate_image(dri_format, loaderPrivate);
+ cpp = _mesa_get_format_bytes(image->format); /* safe since no none format */
+ pitch = stride / cpp;
+ if (offset + height * cpp * pitch > parent->region->bo->size) {
+ _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
+ FREE(image);
+ return NULL;
+ }
+
+ image->region = calloc(sizeof(*image->region), 1);
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ image->region->cpp = _mesa_get_format_bytes(image->format);
+ image->region->width = width;
+ image->region->height = height;
+ image->region->pitch = pitch;
+ image->region->refcount = 1;
+ image->region->bo = parent->region->bo;
+ drm_intel_bo_reference(image->region->bo);
+ image->region->tiling = parent->region->tiling;
+ image->region->screen = parent->region->screen;
+ image->offset = offset;
+
+ intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false);
+ if (offset & mask_x)
+ _mesa_warning(NULL,
+ "intel_create_sub_image: offset not on tile boundary");
+
+ return image;
+}
+
+static struct __DRIimageExtensionRec intelImageExtension = {
+ { __DRI_IMAGE, 5 },
+ intel_create_image_from_name,
+ intel_create_image_from_renderbuffer,
+ intel_destroy_image,
+ intel_create_image,
+ intel_query_image,
+ intel_dup_image,
+ intel_validate_usage,
+ intel_create_image_from_names,
+ intel_from_planar
+};
+
+static const __DRIextension *intelScreenExtensions[] = {
+ &intelTexBufferExtension.base,
+ &intelFlushExtension.base,
+ &intelImageExtension.base,
+ &dri2ConfigQueryExtension.base,
+ NULL
+};
+
+static bool
+intel_get_param(__DRIscreen *psp, int param, int *value)
+{
+ int ret;
+ struct drm_i915_getparam gp;
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = param;
+ gp.value = value;
+
+ ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
+ if (ret) {
+ if (ret != -EINVAL)
+ _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+intel_get_boolean(__DRIscreen *psp, int param)
+{
+ int value = 0;
+ return intel_get_param(psp, param, &value) && value;
+}
+
+static void
+nop_callback(GLuint key, void *data, void *userData)
+{
+}
+
+static void
+intelDestroyScreen(__DRIscreen * sPriv)
+{
+ struct intel_screen *intelScreen = sPriv->driverPrivate;
+
+ dri_bufmgr_destroy(intelScreen->bufmgr);
+ driDestroyOptionInfo(&intelScreen->optionCache);
+
+ /* Some regions may still have references to them at this point, so
+ * flush the hash table to prevent _mesa_DeleteHashTable() from
+ * complaining about the hash not being empty; */
+ _mesa_HashDeleteAll(intelScreen->named_regions, nop_callback, NULL);
+ _mesa_DeleteHashTable(intelScreen->named_regions);
+
+ FREE(intelScreen);
+ sPriv->driverPrivate = NULL;
+}
+
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+static GLboolean
+intelCreateBuffer(__DRIscreen * driScrnPriv,
+ __DRIdrawable * driDrawPriv,
+ const struct gl_config * mesaVis, GLboolean isPixmap)
+{
+ struct intel_renderbuffer *rb;
+ struct intel_screen *screen = (struct intel_screen*) driScrnPriv->driverPrivate;
+ gl_format rgbFormat;
+ unsigned num_samples = intel_quantize_num_samples(screen, mesaVis->samples);
+ struct gl_framebuffer *fb;
+
+ if (isPixmap)
+ return false;
+
+ fb = CALLOC_STRUCT(gl_framebuffer);
+ if (!fb)
+ return false;
+
+ _mesa_initialize_window_framebuffer(fb, mesaVis);
+
+ if (mesaVis->redBits == 5)
+ rgbFormat = MESA_FORMAT_RGB565;
+ else if (mesaVis->alphaBits == 0)
+ rgbFormat = MESA_FORMAT_XRGB8888;
+ else
+ rgbFormat = MESA_FORMAT_ARGB8888;
+
+ /* setup the hardware-based renderbuffers */
+ rb = intel_create_renderbuffer(rgbFormat, num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
+
+ if (mesaVis->doubleBufferMode) {
+ rb = intel_create_renderbuffer(rgbFormat, num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
+ }
+
+ /*
+ * Assert here that the gl_config has an expected depth/stencil bit
+ * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
+ * which constructs the advertised configs.)
+ */
+ if (mesaVis->depthBits == 24) {
+ assert(mesaVis->stencilBits == 8);
+
+ if (screen->hw_has_separate_stencil) {
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_X8_Z24,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_S8,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
+ } else {
+ /*
+ * Use combined depth/stencil. Note that the renderbuffer is
+ * attached to two attachment points.
+ */
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_S8_Z24,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
+ }
+ }
+ else if (mesaVis->depthBits == 16) {
+ assert(mesaVis->stencilBits == 0);
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_Z16,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
+ }
+ else {
+ assert(mesaVis->depthBits == 0);
+ assert(mesaVis->stencilBits == 0);
+ }
+
+ /* now add any/all software-based renderbuffers we may need */
+ _swrast_add_soft_renderbuffers(fb,
+ false, /* never sw color */
+ false, /* never sw depth */
+ false, /* never sw stencil */
+ mesaVis->accumRedBits > 0,
+ false, /* never sw alpha */
+ false /* never sw aux */ );
+ driDrawPriv->driverPrivate = fb;
+
+ return true;
+}
+
+static void
+intelDestroyBuffer(__DRIdrawable * driDrawPriv)
+{
+ struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
+
+ _mesa_reference_framebuffer(&fb, NULL);
+}
+
+/* There are probably better ways to do this, such as an
+ * init-designated function to register chipids and createcontext
+ * functions.
+ */
+extern bool
+i830CreateContext(const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ void *sharedContextPrivate);
+
+extern bool
+i915CreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ unsigned *error,
+ void *sharedContextPrivate);
+extern bool
+brwCreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ uint32_t flags,
+ unsigned *error,
+ void *sharedContextPrivate);
+
+static GLboolean
+intelCreateContext(gl_api api,
+ const struct gl_config * mesaVis,
+ __DRIcontext * driContextPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ uint32_t flags,
+ unsigned *error,
+ void *sharedContextPrivate)
+{
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ struct intel_screen *intelScreen = sPriv->driverPrivate;
+ bool success = false;
+
+#ifdef I915
+ if (IS_9XX(intelScreen->deviceID)) {
+ success = i915CreateContext(api, mesaVis, driContextPriv,
+ major_version, minor_version, error,
+ sharedContextPrivate);
+ } else {
+ switch (api) {
+ case API_OPENGL:
+ if (major_version > 1 || minor_version > 3) {
+ *error = __DRI_CTX_ERROR_BAD_VERSION;
+ success = false;
+ }
+ break;
+ case API_OPENGLES:
+ break;
+ default:
+ *error = __DRI_CTX_ERROR_BAD_API;
+ success = false;
+ }
+
+ if (success) {
+ intelScreen->no_vbo = true;
+ success = i830CreateContext(mesaVis, driContextPriv,
+ sharedContextPrivate);
+ if (!success)
+ *error = __DRI_CTX_ERROR_NO_MEMORY;
+ }
+ }
+#else
+ success = brwCreateContext(api, mesaVis,
+ driContextPriv,
+ major_version, minor_version, flags,
+ error, sharedContextPrivate);
+#endif
+
+ if (success)
+ return true;
+
+ if (driContextPriv->driverPrivate != NULL)
+ intelDestroyContext(driContextPriv);
+
+ return false;
+}
+
+static bool
+intel_init_bufmgr(struct intel_screen *intelScreen)
+{
+ __DRIscreen *spriv = intelScreen->driScrnPriv;
+ int num_fences = 0;
+
+ intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
+
+ intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
+ if (intelScreen->bufmgr == NULL) {
+ fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
+ __func__, __LINE__);
+ return false;
+ }
+
+ if (!intel_get_param(spriv, I915_PARAM_NUM_FENCES_AVAIL, &num_fences) ||
+ num_fences == 0) {
+ fprintf(stderr, "[%s: %u] Kernel 2.6.29 required.\n", __func__, __LINE__);
+ return false;
+ }
+
+ drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
+
+ intelScreen->named_regions = _mesa_NewHashTable();
+
+ intelScreen->relaxed_relocations = 0;
+ intelScreen->relaxed_relocations |=
+ intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA) << 0;
+
+ return true;
+}
+
+/**
+ * Override intel_screen.hw_has_separate_stencil with environment variable
+ * INTEL_SEPARATE_STENCIL.
+ *
+ * Valid values for INTEL_SEPARATE_STENCIL are "0" and "1". If an invalid
+ * valid value is encountered, a warning is emitted and INTEL_SEPARATE_STENCIL
+ * is ignored.
+ */
+static void
+intel_override_separate_stencil(struct intel_screen *screen)
+{
+ const char *s = getenv("INTEL_SEPARATE_STENCIL");
+ if (!s) {
+ return;
+ } else if (!strncmp("0", s, 2)) {
+ screen->hw_has_separate_stencil = false;
+ } else if (!strncmp("1", s, 2)) {
+ screen->hw_has_separate_stencil = true;
+ } else {
+ fprintf(stderr,
+ "warning: env variable INTEL_SEPARATE_STENCIL=\"%s\" has "
+ "invalid value and is ignored", s);
+ }
+}
+
+static bool
+intel_detect_swizzling(struct intel_screen *screen)
+{
+ drm_intel_bo *buffer;
+ unsigned long flags = 0;
+ unsigned long aligned_pitch;
+ uint32_t tiling = I915_TILING_X;
+ uint32_t swizzle_mode = 0;
+
+ buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
+ 64, 64, 4,
+ &tiling, &aligned_pitch, flags);
+ if (buffer == NULL)
+ return false;
+
+ drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
+ drm_intel_bo_unreference(buffer);
+
+ if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
+ return false;
+ else
+ return true;
+}
+
+static __DRIconfig**
+intel_screen_make_configs(__DRIscreen *dri_screen)
+{
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+
+ static const uint8_t singlesample_samples[1] = {0};
+ static const uint8_t multisample_samples[2] = {4, 8};
+
+ struct intel_screen *screen = dri_screen->driverPrivate;
+ GLenum fb_format[3];
+ GLenum fb_type[3];
+ uint8_t depth_bits[4], stencil_bits[4];
+ __DRIconfig **configs = NULL;
+
+ fb_format[0] = GL_RGB;
+ fb_type[0] = GL_UNSIGNED_SHORT_5_6_5;
+
+ fb_format[1] = GL_BGR;
+ fb_type[1] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ fb_format[2] = GL_BGRA;
+ fb_type[2] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ /* Generate singlesample configs without accumulation buffer. */
+ for (int i = 0; i < ARRAY_SIZE(fb_format); i++) {
+ __DRIconfig **new_configs;
+ const int num_depth_stencil_bits = 2;
+
+ /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
+ * buffer that has a different number of bits per pixel than the color
+ * buffer. This isn't yet supported here.
+ */
+ depth_bits[0] = 0;
+ stencil_bits[0] = 0;
+
+ if (fb_type[i] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[1] = 16;
+ stencil_bits[1] = 0;
+ } else {
+ depth_bits[1] = 24;
+ stencil_bits[1] = 8;
+ }
+
+ new_configs = driCreateConfigs(fb_format[i], fb_type[i],
+ depth_bits,
+ stencil_bits,
+ num_depth_stencil_bits,
+ back_buffer_modes,
+ ARRAY_SIZE(back_buffer_modes),
+ singlesample_samples, 1,
+ false);
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ /* Generate the minimum possible set of configs that include an
+ * accumulation buffer.
+ */
+ for (int i = 0; i < ARRAY_SIZE(fb_format); i++) {
+ __DRIconfig **new_configs;
+
+ if (fb_type[i] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[0] = 16;
+ stencil_bits[0] = 0;
+ } else {
+ depth_bits[0] = 24;
+ stencil_bits[0] = 8;
+ }
+
+ new_configs = driCreateConfigs(fb_format[i], fb_type[i],
+ depth_bits, stencil_bits, 1,
+ back_buffer_modes + 1, 1,
+ singlesample_samples, 1,
+ true);
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ /* Generate multisample configs.
+ *
+ * This loop breaks early, and hence is a no-op, on gen < 6.
+ *
+ * Multisample configs must follow the singlesample configs in order to
+ * work around an X server bug present in 1.12. The X server chooses to
+ * associate the first listed RGBA888-Z24S8 config, regardless of its
+ * sample count, with the 32-bit depth visual used for compositing.
+ *
+ * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
+ * supported. Singlebuffer configs are not supported because no one wants
+ * them. GLX_SWAP_COPY_OML is not supported due to page flipping.
+ */
+ for (int i = 0; i < ARRAY_SIZE(fb_format); i++) {
+ if (screen->gen < 6)
+ break;
+
+ __DRIconfig **new_configs;
+ const int num_depth_stencil_bits = 2;
+ int num_msaa_modes = 0;
+
+ depth_bits[0] = 0;
+ stencil_bits[0] = 0;
+
+ if (fb_type[i] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[1] = 16;
+ stencil_bits[1] = 0;
+ } else {
+ depth_bits[1] = 24;
+ stencil_bits[1] = 8;
+ }
+
+ if (screen->gen >= 7)
+ num_msaa_modes = 2;
+ else if (screen->gen == 6)
+ num_msaa_modes = 1;
+
+ new_configs = driCreateConfigs(fb_format[i], fb_type[i],
+ depth_bits,
+ stencil_bits,
+ num_depth_stencil_bits,
+ back_buffer_modes + 1, 1,
+ multisample_samples,
+ num_msaa_modes,
+ false);
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ if (configs == NULL) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+
+ return configs;
+}
+
+/**
+ * This is the driver specific part of the createNewScreen entry point.
+ * Called when using DRI2.
+ *
+ * \return the struct gl_config supported by this driver
+ */
+static const
+__DRIconfig **intelInitScreen2(__DRIscreen *psp)
+{
+ struct intel_screen *intelScreen;
+ unsigned int api_mask;
+
+ if (psp->dri2.loader->base.version <= 2 ||
+ psp->dri2.loader->getBuffersWithFormat == NULL) {
+ fprintf(stderr,
+ "\nERROR! DRI2 loader with getBuffersWithFormat() "
+ "support required\n");
+ return false;
+ }
+
+ /* Allocate the private area */
+ intelScreen = CALLOC(sizeof *intelScreen);
+ if (!intelScreen) {
+ fprintf(stderr, "\nERROR! Allocating private area failed\n");
+ return false;
+ }
+ /* parse information in __driConfigOptions */
+ driParseOptionInfo(&intelScreen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ intelScreen->driScrnPriv = psp;
+ psp->driverPrivate = (void *) intelScreen;
+
+ if (!intel_init_bufmgr(intelScreen))
+ return false;
+
+ intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
+
+ intelScreen->kernel_has_gen7_sol_reset =
+ intel_get_boolean(intelScreen->driScrnPriv,
+ I915_PARAM_HAS_GEN7_SOL_RESET);
+
+ if (IS_GEN7(intelScreen->deviceID)) {
+ intelScreen->gen = 7;
+ } else if (IS_GEN6(intelScreen->deviceID)) {
+ intelScreen->gen = 6;
+ } else if (IS_GEN5(intelScreen->deviceID)) {
+ intelScreen->gen = 5;
+ } else if (IS_965(intelScreen->deviceID)) {
+ intelScreen->gen = 4;
+ } else if (IS_9XX(intelScreen->deviceID)) {
+ intelScreen->gen = 3;
+ } else {
+ intelScreen->gen = 2;
+ }
+
+ intelScreen->hw_has_separate_stencil = intelScreen->gen >= 6;
+ intelScreen->hw_must_use_separate_stencil = intelScreen->gen >= 7;
+
+ int has_llc = 0;
+ bool success = intel_get_param(intelScreen->driScrnPriv, I915_PARAM_HAS_LLC,
+ &has_llc);
+ if (success && has_llc)
+ intelScreen->hw_has_llc = true;
+ else if (!success && intelScreen->gen >= 6)
+ intelScreen->hw_has_llc = true;
+
+ intel_override_separate_stencil(intelScreen);
+
+ api_mask = (1 << __DRI_API_OPENGL);
+#if FEATURE_ES1
+ api_mask |= (1 << __DRI_API_GLES);
+#endif
+#if FEATURE_ES2
+ api_mask |= (1 << __DRI_API_GLES2);
+#endif
+
+ if (IS_9XX(intelScreen->deviceID) || IS_965(intelScreen->deviceID))
+ psp->api_mask = api_mask;
+
+ intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
+
+ psp->extensions = intelScreenExtensions;
+
+ return (const __DRIconfig**) intel_screen_make_configs(psp);
+}
+
+struct intel_buffer {
+ __DRIbuffer base;
+ struct intel_region *region;
+};
+
+static __DRIbuffer *
+intelAllocateBuffer(__DRIscreen *screen,
+ unsigned attachment, unsigned format,
+ int width, int height)
+{
+ struct intel_buffer *intelBuffer;
+ struct intel_screen *intelScreen = screen->driverPrivate;
+
+ assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
+ attachment == __DRI_BUFFER_BACK_LEFT);
+
+ intelBuffer = CALLOC(sizeof *intelBuffer);
+ if (intelBuffer == NULL)
+ return NULL;
+
+ /* The front and back buffers are color buffers, which are X tiled. */
+ intelBuffer->region = intel_region_alloc(intelScreen,
+ I915_TILING_X,
+ format / 8,
+ width,
+ height,
+ true);
+
+ if (intelBuffer->region == NULL) {
+ FREE(intelBuffer);
+ return NULL;
+ }
+
+ intel_region_flink(intelBuffer->region, &intelBuffer->base.name);
+
+ intelBuffer->base.attachment = attachment;
+ intelBuffer->base.cpp = intelBuffer->region->cpp;
+ intelBuffer->base.pitch =
+ intelBuffer->region->pitch * intelBuffer->region->cpp;
+
+ return &intelBuffer->base;
+}
+
+static void
+intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
+{
+ struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
+
+ intel_region_release(&intelBuffer->region);
+ free(intelBuffer);
+}
+
+
+const struct __DriverAPIRec driDriverAPI = {
+ .InitScreen = intelInitScreen2,
+ .DestroyScreen = intelDestroyScreen,
+ .CreateContext = intelCreateContext,
+ .DestroyContext = intelDestroyContext,
+ .CreateBuffer = intelCreateBuffer,
+ .DestroyBuffer = intelDestroyBuffer,
+ .MakeCurrent = intelMakeCurrent,
+ .UnbindContext = intelUnbindContext,
+ .AllocateBuffer = intelAllocateBuffer,
+ .ReleaseBuffer = intelReleaseBuffer
+};
+
+/* This is the table of extensions that the loader will dlsym() for. */
+PUBLIC const __DRIextension *__driDriverExtensions[] = {
+ &driCoreExtension.base,
+ &driDRI2Extension.base,
+ NULL
+};
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_span.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_span.c
index 05e5e8e5833..d7eaa41241e 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_span.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_span.c
@@ -1 +1,215 @@
-../intel/intel_span.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2011 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Chad Versace <chad@chad-versace.us>
+ *
+ **************************************************************************/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "main/glheader.h"
+#include "main/macros.h"
+#include "main/mtypes.h"
+#include "main/colormac.h"
+#include "main/renderbuffer.h"
+
+#include "intel_buffers.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_screen.h"
+#include "intel_span.h"
+#include "intel_regions.h"
+#include "intel_tex.h"
+
+#include "swrast/swrast.h"
+#include "swrast/s_renderbuffer.h"
+
+/**
+ * \brief Get pointer offset into stencil buffer.
+ *
+ * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
+ * must decode the tile's layout in software.
+ *
+ * See
+ * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
+ * Format.
+ * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
+ *
+ * Even though the returned offset is always positive, the return type is
+ * signed due to
+ * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
+ * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
+ */
+intptr_t
+intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
+{
+ uint32_t tile_size = 4096;
+ uint32_t tile_width = 64;
+ uint32_t tile_height = 64;
+ uint32_t row_size = 64 * stride;
+
+ uint32_t tile_x = x / tile_width;
+ uint32_t tile_y = y / tile_height;
+
+ /* The byte's address relative to the tile's base addres. */
+ uint32_t byte_x = x % tile_width;
+ uint32_t byte_y = y % tile_height;
+
+ uintptr_t u = tile_y * row_size
+ + tile_x * tile_size
+ + 512 * (byte_x / 8)
+ + 64 * (byte_y / 8)
+ + 32 * ((byte_y / 4) % 2)
+ + 16 * ((byte_x / 4) % 2)
+ + 8 * ((byte_y / 2) % 2)
+ + 4 * ((byte_x / 2) % 2)
+ + 2 * (byte_y % 2)
+ + 1 * (byte_x % 2);
+
+ if (swizzled) {
+ /* adjust for bit6 swizzling */
+ if (((byte_x / 8) % 2) == 1) {
+ if (((byte_y / 8) % 2) == 0) {
+ u += 64;
+ } else {
+ u -= 64;
+ }
+ }
+ }
+
+ return u;
+}
+
+/**
+ * Map the regions needed by intelSpanRenderStart().
+ */
+static void
+intel_span_map_buffers(struct intel_context *intel)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct intel_texture_object *tex_obj;
+
+ for (int i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (!ctx->Texture.Unit[i]._ReallyEnabled)
+ continue;
+ tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+ intel_finalize_mipmap_tree(intel, i);
+ intel_tex_map_images(intel, tex_obj,
+ GL_MAP_READ_BIT | GL_MAP_WRITE_BIT);
+ }
+
+ _swrast_map_renderbuffers(ctx);
+}
+
+/**
+ * Prepare for software rendering. Map current read/draw framebuffers'
+ * renderbuffes and all currently bound texture objects.
+ *
+ * Old note: Moved locking out to get reasonable span performance.
+ */
+void
+intelSpanRenderStart(struct gl_context * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush(ctx);
+ intel_prepare_render(intel);
+ intel_flush(ctx);
+ intel_span_map_buffers(intel);
+}
+
+/**
+ * Called when done software rendering. Unmap the buffers we mapped in
+ * the above function.
+ */
+void
+intelSpanRenderFinish(struct gl_context * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ _swrast_flush(ctx);
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ _swrast_unmap_renderbuffers(ctx);
+}
+
+
+void
+intelInitSpanFuncs(struct gl_context * ctx)
+{
+ struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
+ if (swdd) {
+ swdd->SpanRenderStart = intelSpanRenderStart;
+ swdd->SpanRenderFinish = intelSpanRenderFinish;
+ }
+}
+
+void
+intel_map_vertex_shader_textures(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ int i;
+
+ if (ctx->VertexProgram._Current == NULL)
+ return;
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled &&
+ ctx->VertexProgram._Current->Base.TexturesUsed[i] != 0) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+
+ intel_tex_map_images(intel, intel_texture_object(texObj),
+ GL_MAP_READ_BIT | GL_MAP_WRITE_BIT);
+ }
+ }
+}
+
+void
+intel_unmap_vertex_shader_textures(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ int i;
+
+ if (ctx->VertexProgram._Current == NULL)
+ return;
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled &&
+ ctx->VertexProgram._Current->Base.TexturesUsed[i] != 0) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_state.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_state.c
index 519672fc359..6a817cdf3f6 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_state.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_state.c
@@ -1 +1,195 @@
-../intel/intel_state.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/macros.h"
+#include "main/enums.h"
+#include "main/colormac.h"
+#include "main/dd.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+
+int
+intel_translate_shadow_compare_func(GLenum func)
+{
+ switch (func) {
+ case GL_NEVER:
+ return COMPAREFUNC_ALWAYS;
+ case GL_LESS:
+ return COMPAREFUNC_LEQUAL;
+ case GL_LEQUAL:
+ return COMPAREFUNC_LESS;
+ case GL_GREATER:
+ return COMPAREFUNC_GEQUAL;
+ case GL_GEQUAL:
+ return COMPAREFUNC_GREATER;
+ case GL_NOTEQUAL:
+ return COMPAREFUNC_EQUAL;
+ case GL_EQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case GL_ALWAYS:
+ return COMPAREFUNC_NEVER;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_NEVER;
+}
+
+int
+intel_translate_compare_func(GLenum func)
+{
+ switch (func) {
+ case GL_NEVER:
+ return COMPAREFUNC_NEVER;
+ case GL_LESS:
+ return COMPAREFUNC_LESS;
+ case GL_LEQUAL:
+ return COMPAREFUNC_LEQUAL;
+ case GL_GREATER:
+ return COMPAREFUNC_GREATER;
+ case GL_GEQUAL:
+ return COMPAREFUNC_GEQUAL;
+ case GL_NOTEQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case GL_EQUAL:
+ return COMPAREFUNC_EQUAL;
+ case GL_ALWAYS:
+ return COMPAREFUNC_ALWAYS;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_ALWAYS;
+}
+
+int
+intel_translate_stencil_op(GLenum op)
+{
+ switch (op) {
+ case GL_KEEP:
+ return STENCILOP_KEEP;
+ case GL_ZERO:
+ return STENCILOP_ZERO;
+ case GL_REPLACE:
+ return STENCILOP_REPLACE;
+ case GL_INCR:
+ return STENCILOP_INCRSAT;
+ case GL_DECR:
+ return STENCILOP_DECRSAT;
+ case GL_INCR_WRAP:
+ return STENCILOP_INCR;
+ case GL_DECR_WRAP:
+ return STENCILOP_DECR;
+ case GL_INVERT:
+ return STENCILOP_INVERT;
+ default:
+ return STENCILOP_ZERO;
+ }
+}
+
+int
+intel_translate_blend_factor(GLenum factor)
+{
+ switch (factor) {
+ case GL_ZERO:
+ return BLENDFACT_ZERO;
+ case GL_SRC_ALPHA:
+ return BLENDFACT_SRC_ALPHA;
+ case GL_ONE:
+ return BLENDFACT_ONE;
+ case GL_SRC_COLOR:
+ return BLENDFACT_SRC_COLR;
+ case GL_ONE_MINUS_SRC_COLOR:
+ return BLENDFACT_INV_SRC_COLR;
+ case GL_DST_COLOR:
+ return BLENDFACT_DST_COLR;
+ case GL_ONE_MINUS_DST_COLOR:
+ return BLENDFACT_INV_DST_COLR;
+ case GL_ONE_MINUS_SRC_ALPHA:
+ return BLENDFACT_INV_SRC_ALPHA;
+ case GL_DST_ALPHA:
+ return BLENDFACT_DST_ALPHA;
+ case GL_ONE_MINUS_DST_ALPHA:
+ return BLENDFACT_INV_DST_ALPHA;
+ case GL_SRC_ALPHA_SATURATE:
+ return BLENDFACT_SRC_ALPHA_SATURATE;
+ case GL_CONSTANT_COLOR:
+ return BLENDFACT_CONST_COLOR;
+ case GL_ONE_MINUS_CONSTANT_COLOR:
+ return BLENDFACT_INV_CONST_COLOR;
+ case GL_CONSTANT_ALPHA:
+ return BLENDFACT_CONST_ALPHA;
+ case GL_ONE_MINUS_CONSTANT_ALPHA:
+ return BLENDFACT_INV_CONST_ALPHA;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, factor);
+ return BLENDFACT_ZERO;
+}
+
+int
+intel_translate_logic_op(GLenum opcode)
+{
+ switch (opcode) {
+ case GL_CLEAR:
+ return LOGICOP_CLEAR;
+ case GL_AND:
+ return LOGICOP_AND;
+ case GL_AND_REVERSE:
+ return LOGICOP_AND_RVRSE;
+ case GL_COPY:
+ return LOGICOP_COPY;
+ case GL_COPY_INVERTED:
+ return LOGICOP_COPY_INV;
+ case GL_AND_INVERTED:
+ return LOGICOP_AND_INV;
+ case GL_NOOP:
+ return LOGICOP_NOOP;
+ case GL_XOR:
+ return LOGICOP_XOR;
+ case GL_OR:
+ return LOGICOP_OR;
+ case GL_OR_INVERTED:
+ return LOGICOP_OR_INV;
+ case GL_NOR:
+ return LOGICOP_NOR;
+ case GL_EQUIV:
+ return LOGICOP_EQUIV;
+ case GL_INVERT:
+ return LOGICOP_INV;
+ case GL_OR_REVERSE:
+ return LOGICOP_OR_RVRSE;
+ case GL_NAND:
+ return LOGICOP_NAND;
+ case GL_SET:
+ return LOGICOP_SET;
+ default:
+ return LOGICOP_SET;
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_syncobj.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_syncobj.c
index 0b2e56ab246..e965896df60 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_syncobj.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_syncobj.c
@@ -1 +1,132 @@
-../intel/intel_syncobj.c \ No newline at end of file
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file intel_syncobj.c
+ *
+ * Support for ARB_sync
+ *
+ * ARB_sync is implemented by flushing the current batchbuffer and keeping a
+ * reference on it. We can then check for completion or wait for completion
+ * using the normal buffer object mechanisms. This does mean that if an
+ * application is using many sync objects, it will emit small batchbuffers
+ * which may end up being a significant overhead. In other tests of removing
+ * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
+ * performance bottleneck, though.
+ */
+
+#include "main/simple_list.h"
+#include "main/imports.h"
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_reg.h"
+
+static struct gl_sync_object *
+intel_new_sync_object(struct gl_context *ctx, GLuint id)
+{
+ struct intel_sync_object *sync;
+
+ sync = calloc(1, sizeof(struct intel_sync_object));
+
+ return &sync->Base;
+}
+
+static void
+intel_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ drm_intel_bo_unreference(sync->bo);
+ free(sync);
+}
+
+static void
+intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLenum condition, GLbitfield flags)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ sync->bo = intel->batch.bo;
+ drm_intel_bo_reference(sync->bo);
+
+ intel_flush(ctx);
+}
+
+/* We ignore the user-supplied timeout. This is weaselly -- we're allowed to
+ * round to an implementation-dependent accuracy, and right now our
+ * implementation "rounds" to the wait-forever value.
+ *
+ * The fix would be a new kernel function to do the GTT transition with a
+ * timeout.
+ */
+static void intel_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ if (sync->bo) {
+ drm_intel_bo_wait_rendering(sync->bo);
+ s->StatusFlag = 1;
+ drm_intel_bo_unreference(sync->bo);
+ sync->bo = NULL;
+ }
+}
+
+/* We have nothing to do for WaitSync. Our GL command stream is sequential,
+ * so given that the sync object has already flushed the batchbuffer,
+ * any batchbuffers coming after this waitsync will naturally not occur until
+ * the previous one is done.
+ */
+static void intel_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
+{
+}
+
+static void intel_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
+ drm_intel_bo_unreference(sync->bo);
+ sync->bo = NULL;
+ s->StatusFlag = 1;
+ }
+}
+
+void intel_init_syncobj_functions(struct dd_function_table *functions)
+{
+ functions->NewSyncObject = intel_new_sync_object;
+ functions->DeleteSyncObject = intel_delete_sync_object;
+ functions->FenceSync = intel_fence_sync;
+ functions->CheckSync = intel_check_sync;
+ functions->ClientWaitSync = intel_client_wait_sync;
+ functions->ServerWaitSync = intel_server_wait_sync;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex.c
index d77ce749a3e..5d938798d9f 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex.c
@@ -1 +1,222 @@
-../intel/intel_tex.c \ No newline at end of file
+#include "swrast/swrast.h"
+#include "main/renderbuffer.h"
+#include "main/texobj.h"
+#include "main/teximage.h"
+#include "main/mipmap.h"
+#include "drivers/common/meta.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static struct gl_texture_image *
+intelNewTextureImage(struct gl_context * ctx)
+{
+ DBG("%s\n", __FUNCTION__);
+ (void) ctx;
+ return (struct gl_texture_image *) CALLOC_STRUCT(intel_texture_image);
+}
+
+static void
+intelDeleteTextureImage(struct gl_context * ctx, struct gl_texture_image *img)
+{
+ /* nothing special (yet) for intel_texture_image */
+ _mesa_delete_texture_image(ctx, img);
+}
+
+
+static struct gl_texture_object *
+intelNewTextureObject(struct gl_context * ctx, GLuint name, GLenum target)
+{
+ struct intel_texture_object *obj = CALLOC_STRUCT(intel_texture_object);
+
+ (void) ctx;
+
+ DBG("%s\n", __FUNCTION__);
+ _mesa_initialize_texture_object(&obj->base, name, target);
+
+ return &obj->base;
+}
+
+static void
+intelDeleteTextureObject(struct gl_context *ctx,
+ struct gl_texture_object *texObj)
+{
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ intel_miptree_release(&intelObj->mt);
+ _mesa_delete_texture_object(ctx, texObj);
+}
+
+static GLboolean
+intel_alloc_texture_image_buffer(struct gl_context *ctx,
+ struct gl_texture_image *image)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct gl_texture_object *texobj = image->TexObject;
+ struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
+ GLuint slices;
+
+ assert(image->Border == 0);
+
+ /* Because the driver uses AllocTextureImageBuffer() internally, it may end
+ * up mismatched with FreeTextureImageBuffer(), but that is safe to call
+ * multiple times.
+ */
+ ctx->Driver.FreeTextureImageBuffer(ctx, image);
+
+ /* Allocate the swrast_texture_image::ImageOffsets array now */
+ switch (texobj->Target) {
+ case GL_TEXTURE_3D:
+ case GL_TEXTURE_2D_ARRAY:
+ slices = image->Depth;
+ break;
+ case GL_TEXTURE_1D_ARRAY:
+ slices = image->Height;
+ break;
+ default:
+ slices = 1;
+ }
+ assert(!intel_image->base.ImageOffsets);
+ intel_image->base.ImageOffsets = malloc(slices * sizeof(GLuint));
+
+ _swrast_init_texture_image(image);
+
+ if (intel_texobj->mt &&
+ intel_miptree_match_image(intel_texobj->mt, image)) {
+ intel_miptree_reference(&intel_image->mt, intel_texobj->mt);
+ DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n",
+ __FUNCTION__, texobj, image->Level,
+ image->Width, image->Height, image->Depth, intel_texobj->mt);
+ } else {
+ intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj,
+ intel_image,
+ false);
+
+ /* Even if the object currently has a mipmap tree associated
+ * with it, this one is a more likely candidate to represent the
+ * whole object since our level didn't fit what was there
+ * before, and any lower levels would fit into our miptree.
+ */
+ intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
+
+ DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n",
+ __FUNCTION__, texobj, image->Level,
+ image->Width, image->Height, image->Depth, intel_image->mt);
+ }
+
+ return true;
+}
+
+/**
+ * Called via ctx->Driver.AllocTextureStorage()
+ * Just have to allocate memory for the texture images.
+ */
+static GLboolean
+intel_alloc_texture_storage(struct gl_context *ctx,
+ struct gl_texture_object *texObj,
+ GLsizei levels, GLsizei width,
+ GLsizei height, GLsizei depth)
+{
+ const int numFaces = _mesa_num_tex_faces(texObj->Target);
+ int face;
+ int level;
+
+ for (face = 0; face < numFaces; face++) {
+ for (level = 0; level < levels; level++) {
+ struct gl_texture_image *const texImage = texObj->Image[face][level];
+ if (!intel_alloc_texture_image_buffer(ctx, texImage))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void
+intel_free_texture_image_buffer(struct gl_context * ctx,
+ struct gl_texture_image *texImage)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ DBG("%s\n", __FUNCTION__);
+
+ intel_miptree_release(&intelImage->mt);
+
+ if (intelImage->base.Buffer) {
+ _mesa_align_free(intelImage->base.Buffer);
+ intelImage->base.Buffer = NULL;
+ }
+
+ if (intelImage->base.ImageOffsets) {
+ free(intelImage->base.ImageOffsets);
+ intelImage->base.ImageOffsets = NULL;
+ }
+}
+
+/**
+ * Map texture memory/buffer into user space.
+ * Note: the region of interest parameters are ignored here.
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
+ * \param mapOut returns start of mapping of region of interest
+ * \param rowStrideOut returns row stride in bytes
+ */
+static void
+intel_map_texture_image(struct gl_context *ctx,
+ struct gl_texture_image *tex_image,
+ GLuint slice,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **map,
+ GLint *stride)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(tex_image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+
+ /* Our texture data is always stored in a miptree. */
+ assert(mt);
+
+ /* Check that our caller wasn't confused about how to map a 1D texture. */
+ assert(tex_image->TexObject->Target != GL_TEXTURE_1D_ARRAY ||
+ h == 1);
+
+ /* intel_miptree_map operates on a unified "slice" number that references the
+ * cube face, since it's all just slices to the miptree code.
+ */
+ if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
+ slice = tex_image->Face;
+
+ intel_miptree_map(intel, mt, tex_image->Level, slice, x, y, w, h, mode,
+ (void **)map, stride);
+}
+
+static void
+intel_unmap_texture_image(struct gl_context *ctx,
+ struct gl_texture_image *tex_image, GLuint slice)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(tex_image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+
+ if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
+ slice = tex_image->Face;
+
+ intel_miptree_unmap(intel, mt, tex_image->Level, slice);
+}
+
+void
+intelInitTextureFuncs(struct dd_function_table *functions)
+{
+ functions->NewTextureObject = intelNewTextureObject;
+ functions->NewTextureImage = intelNewTextureImage;
+ functions->DeleteTextureImage = intelDeleteTextureImage;
+ functions->DeleteTexture = intelDeleteTextureObject;
+ functions->AllocTextureImageBuffer = intel_alloc_texture_image_buffer;
+ functions->FreeTextureImageBuffer = intel_free_texture_image_buffer;
+ functions->AllocTextureStorage = intel_alloc_texture_storage;
+ functions->MapTextureImage = intel_map_texture_image;
+ functions->UnmapTextureImage = intel_unmap_texture_image;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_copy.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_copy.c
index 87196c5d1ed..f4366330f88 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_copy.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_copy.c
@@ -1 +1,171 @@
-../intel/intel_tex_copy.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/teximage.h"
+#include "main/texstate.h"
+#include "main/fbobject.h"
+
+#include "drivers/common/meta.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_fbo.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+
+bool
+intel_copy_texsubimage(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ GLint dstx, GLint dsty,
+ struct intel_renderbuffer *irb,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct intel_region *region;
+ const GLenum internalFormat = intelImage->base.Base.InternalFormat;
+ bool copy_supported = false;
+ bool copy_supported_with_alpha_override = false;
+
+ intel_prepare_render(intel);
+
+ if (!intelImage->mt || !irb || !irb->mt) {
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF))
+ fprintf(stderr, "%s fail %p %p (0x%08x)\n",
+ __FUNCTION__, intelImage->mt, irb, internalFormat);
+ return false;
+ } else {
+ region = irb->mt->region;
+ assert(region);
+ }
+
+ copy_supported = intelImage->base.Base.TexFormat == intel_rb_format(irb);
+
+ /* Converting ARGB8888 to XRGB8888 is trivial: ignore the alpha bits */
+ if (intel_rb_format(irb) == MESA_FORMAT_ARGB8888 &&
+ intelImage->base.Base.TexFormat == MESA_FORMAT_XRGB8888) {
+ copy_supported = true;
+ }
+
+ /* Converting XRGB8888 to ARGB8888 requires setting the alpha bits to 1.0 */
+ if (intel_rb_format(irb) == MESA_FORMAT_XRGB8888 &&
+ intelImage->base.Base.TexFormat == MESA_FORMAT_ARGB8888) {
+ copy_supported_with_alpha_override = true;
+ }
+
+ if (!copy_supported && !copy_supported_with_alpha_override) {
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF))
+ fprintf(stderr, "%s mismatched formats %s, %s\n",
+ __FUNCTION__,
+ _mesa_get_format_name(intelImage->base.Base.TexFormat),
+ _mesa_get_format_name(intel_rb_format(irb)));
+ return false;
+ }
+
+ {
+ GLuint image_x, image_y;
+ GLshort src_pitch;
+
+ /* get dest x/y in destination texture */
+ intel_miptree_get_image_offset(intelImage->mt,
+ intelImage->base.Base.Level,
+ intelImage->base.Base.Face,
+ 0,
+ &image_x, &image_y);
+
+ /* The blitter can't handle Y-tiled buffers. */
+ if (intelImage->mt->region->tiling == I915_TILING_Y) {
+ return false;
+ }
+
+ if (_mesa_is_winsys_fbo(ctx->ReadBuffer)) {
+ /* Flip vertical orientation for system framebuffers */
+ y = ctx->ReadBuffer->Height - (y + height);
+ src_pitch = -region->pitch;
+ } else {
+ /* reading from a FBO, y is already oriented the way we like */
+ src_pitch = region->pitch;
+ }
+
+ /* blit from src buffer to texture */
+ if (!intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_pitch,
+ region->bo,
+ 0,
+ region->tiling,
+ intelImage->mt->region->pitch,
+ intelImage->mt->region->bo,
+ 0,
+ intelImage->mt->region->tiling,
+ irb->draw_x + x, irb->draw_y + y,
+ image_x + dstx, image_y + dsty,
+ width, height,
+ GL_COPY)) {
+ return false;
+ }
+ }
+
+ if (copy_supported_with_alpha_override)
+ intel_set_teximage_alpha_to_one(ctx, intelImage);
+
+ return true;
+}
+
+
+static void
+intelCopyTexSubImage(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ struct gl_renderbuffer *rb,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height)
+{
+ if (dims == 3 || !intel_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ xoffset, yoffset,
+ intel_renderbuffer(rb), x, y, width, height)) {
+ fallback_debug("%s - fallback to swrast\n", __FUNCTION__);
+ _mesa_meta_CopyTexSubImage(ctx, dims, texImage,
+ xoffset, yoffset, zoffset,
+ rb, x, y, width, height);
+ }
+}
+
+
+void
+intelInitTextureCopyImageFuncs(struct dd_function_table *functions)
+{
+ functions->CopyTexSubImage = intelCopyTexSubImage;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_format.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_format.c
index 3415f754705..f53054d55d3 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_format.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_format.c
@@ -1 +1,65 @@
-../intel/intel_tex_format.c \ No newline at end of file
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "main/enums.h"
+#include "main/formats.h"
+
+/**
+ * Returns the renderbuffer DataType for a MESA_FORMAT.
+ */
+GLenum
+intel_mesa_format_to_rb_datatype(gl_format format)
+{
+ switch (format) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ case MESA_FORMAT_SARGB8:
+ case MESA_FORMAT_R8:
+ case MESA_FORMAT_GR88:
+ case MESA_FORMAT_A8:
+ case MESA_FORMAT_I8:
+ case MESA_FORMAT_L8:
+ case MESA_FORMAT_AL88:
+ case MESA_FORMAT_RGB565:
+ case MESA_FORMAT_ARGB1555:
+ case MESA_FORMAT_ARGB4444:
+ case MESA_FORMAT_S8:
+ return GL_UNSIGNED_BYTE;
+ case MESA_FORMAT_R16:
+ case MESA_FORMAT_RG1616:
+ case MESA_FORMAT_Z16:
+ return GL_UNSIGNED_SHORT;
+ case MESA_FORMAT_X8_Z24:
+ return GL_UNSIGNED_INT;
+ case MESA_FORMAT_S8_Z24:
+ return GL_UNSIGNED_INT_24_8_EXT;
+ case MESA_FORMAT_RGBA_FLOAT32:
+ case MESA_FORMAT_RG_FLOAT32:
+ case MESA_FORMAT_R_FLOAT32:
+ case MESA_FORMAT_INTENSITY_FLOAT32:
+ case MESA_FORMAT_LUMINANCE_FLOAT32:
+ case MESA_FORMAT_ALPHA_FLOAT32:
+ case MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32:
+ return GL_FLOAT;
+
+ /* The core depthstencil wrappers demand this. */
+ case MESA_FORMAT_Z32_FLOAT_X24S8:
+ return GL_FLOAT_32_UNSIGNED_INT_24_8_REV;
+
+ default:
+ /* Unsupported format. We may hit this when people ask for FBO-incomplete
+ * formats.
+ */
+ return GL_NONE;
+ }
+}
+
+int intel_compressed_num_bytes(GLuint mesaFormat)
+{
+ GLuint bw, bh;
+ GLuint block_size;
+
+ block_size = _mesa_get_format_bytes(mesaFormat);
+ _mesa_get_format_block_size(mesaFormat, &bw, &bh);
+
+ return block_size / bw;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_image.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_image.c
index 567abe4974e..fe9040cf1b6 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_image.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_image.c
@@ -1 +1,360 @@
-../intel/intel_tex_image.c \ No newline at end of file
+
+#include "main/glheader.h"
+#include "main/macros.h"
+#include "main/mfeatures.h"
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/bufferobj.h"
+#include "main/context.h"
+#include "main/formats.h"
+#include "main/pbo.h"
+#include "main/renderbuffer.h"
+#include "main/texcompress.h"
+#include "main/texgetimage.h"
+#include "main/texobj.h"
+#include "main/teximage.h"
+#include "main/texstore.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+#include "intel_fbo.h"
+#include "intel_span.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/* Work back from the specified level of the image to the baselevel and create a
+ * miptree of that size.
+ */
+struct intel_mipmap_tree *
+intel_miptree_create_for_teximage(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage,
+ bool expect_accelerated_upload)
+{
+ GLuint firstLevel;
+ GLuint lastLevel;
+ int width, height, depth;
+ GLuint i;
+
+ intel_miptree_get_dimensions_for_image(&intelImage->base.Base,
+ &width, &height, &depth);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->base.Base.Level > intelObj->base.BaseLevel &&
+ (width == 1 ||
+ (intelObj->base.Target != GL_TEXTURE_1D && height == 1) ||
+ (intelObj->base.Target == GL_TEXTURE_3D && depth == 1))) {
+ /* For this combination, we're at some lower mipmap level and
+ * some important dimension is 1. We can't extrapolate up to a
+ * likely base level width/height/depth for a full mipmap stack
+ * from this info, so just allocate this one level.
+ */
+ firstLevel = intelImage->base.Base.Level;
+ lastLevel = intelImage->base.Base.Level;
+ } else {
+ /* If this image disrespects BaseLevel, allocate from level zero.
+ * Usually BaseLevel == 0, so it's unlikely to happen.
+ */
+ if (intelImage->base.Base.Level < intelObj->base.BaseLevel)
+ firstLevel = 0;
+ else
+ firstLevel = intelObj->base.BaseLevel;
+
+ /* Figure out image dimensions at start level. */
+ for (i = intelImage->base.Base.Level; i > firstLevel; i--) {
+ width <<= 1;
+ if (height != 1)
+ height <<= 1;
+ if (depth != 1)
+ depth <<= 1;
+ }
+
+ /* Guess a reasonable value for lastLevel. This is probably going
+ * to be wrong fairly often and might mean that we have to look at
+ * resizable buffers, or require that buffers implement lazy
+ * pagetable arrangements.
+ */
+ if ((intelObj->base.Sampler.MinFilter == GL_NEAREST ||
+ intelObj->base.Sampler.MinFilter == GL_LINEAR) &&
+ intelImage->base.Base.Level == firstLevel &&
+ (intel->gen < 4 || firstLevel == 0)) {
+ lastLevel = firstLevel;
+ } else if (intelObj->base.Target == GL_TEXTURE_EXTERNAL_OES) {
+ lastLevel = firstLevel;
+ } else {
+ lastLevel = firstLevel + _mesa_logbase2(MAX2(MAX2(width, height), depth));
+ }
+ }
+
+ return intel_miptree_create(intel,
+ intelObj->base.Target,
+ intelImage->base.Base.TexFormat,
+ firstLevel,
+ lastLevel,
+ width,
+ height,
+ depth,
+ expect_accelerated_upload,
+ 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_NONE);
+}
+
+/* There are actually quite a few combinations this will work for,
+ * more than what I've listed here.
+ */
+static bool
+check_pbo_format(GLenum format, GLenum type,
+ gl_format mesa_format)
+{
+ switch (mesa_format) {
+ case MESA_FORMAT_ARGB8888:
+ return (format == GL_BGRA && (type == GL_UNSIGNED_BYTE ||
+ type == GL_UNSIGNED_INT_8_8_8_8_REV));
+ case MESA_FORMAT_RGB565:
+ return (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5);
+ case MESA_FORMAT_L8:
+ return (format == GL_LUMINANCE && type == GL_UNSIGNED_BYTE);
+ case MESA_FORMAT_YCBCR:
+ return (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE);
+ default:
+ return false;
+ }
+}
+
+
+/* XXX: Do this for TexSubImage also:
+ */
+static bool
+try_pbo_upload(struct gl_context *ctx,
+ struct gl_texture_image *image,
+ const struct gl_pixelstore_attrib *unpack,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(image);
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_x, dst_y, dst_stride;
+ drm_intel_bo *dst_buffer, *src_buffer;
+
+ if (!_mesa_is_bufferobj(unpack->BufferObj))
+ return false;
+
+ DBG("trying pbo upload\n");
+
+ if (intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ DBG("%s: image transfer\n", __FUNCTION__);
+ return false;
+ }
+
+ if (!check_pbo_format(format, type, intelImage->base.Base.TexFormat)) {
+ DBG("%s: format mismatch (upload to %s with format 0x%x, type 0x%x)\n",
+ __FUNCTION__, _mesa_get_format_name(intelImage->base.Base.TexFormat),
+ format, type);
+ return false;
+ }
+
+ ctx->Driver.AllocTextureImageBuffer(ctx, image);
+
+ if (!intelImage->mt) {
+ DBG("%s: no miptree\n", __FUNCTION__);
+ return false;
+ }
+
+ dst_buffer = intelImage->mt->region->bo;
+ src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset);
+ /* note: potential 64-bit ptr to 32-bit int cast */
+ src_offset += (GLuint) (unsigned long) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = image->Width;
+
+ intel_miptree_get_image_offset(intelImage->mt, intelImage->base.Base.Level,
+ intelImage->base.Base.Face, 0,
+ &dst_x, &dst_y);
+
+ dst_stride = intelImage->mt->region->pitch;
+
+ if (!intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_stride, src_buffer,
+ src_offset, false,
+ dst_stride, dst_buffer, 0,
+ intelImage->mt->region->tiling,
+ 0, 0, dst_x, dst_y, image->Width, image->Height,
+ GL_COPY)) {
+ DBG("%s: blit failed\n", __FUNCTION__);
+ return false;
+ }
+
+ DBG("%s: success\n", __FUNCTION__);
+ return true;
+}
+
+static void
+intelTexImage(struct gl_context * ctx,
+ GLuint dims,
+ struct gl_texture_image *texImage,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack)
+{
+ DBG("%s target %s level %d %dx%dx%d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(texImage->TexObject->Target),
+ texImage->Level, texImage->Width, texImage->Height, texImage->Depth);
+
+ /* Attempt to use the blitter for PBO image uploads.
+ */
+ if (dims <= 2 &&
+ try_pbo_upload(ctx, texImage, unpack, format, type, pixels)) {
+ return;
+ }
+
+ DBG("%s: upload image %dx%dx%d pixels %p\n",
+ __FUNCTION__, texImage->Width, texImage->Height, texImage->Depth,
+ pixels);
+
+ _mesa_store_teximage(ctx, dims, texImage,
+ format, type, pixels, unpack);
+}
+
+
+/**
+ * Binds a region to a texture image, like it was uploaded by glTexImage2D().
+ *
+ * Used for GLX_EXT_texture_from_pixmap and EGL image extensions,
+ */
+static void
+intel_set_texture_image_region(struct gl_context *ctx,
+ struct gl_texture_image *image,
+ struct intel_region *region,
+ GLenum target,
+ GLenum internalFormat,
+ gl_format format,
+ uint32_t offset)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct gl_texture_object *texobj = image->TexObject;
+ struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
+
+ _mesa_init_teximage_fields(&intel->ctx, image,
+ region->width, region->height, 1,
+ 0, internalFormat, format);
+
+ ctx->Driver.FreeTextureImageBuffer(ctx, image);
+
+ intel_image->mt = intel_miptree_create_for_region(intel, target,
+ image->TexFormat,
+ region);
+ if (intel_image->mt == NULL)
+ return;
+
+ intel_image->mt->offset = offset;
+ intel_image->base.RowStride = region->pitch;
+
+ /* Immediately validate the image to the object. */
+ intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
+}
+
+void
+intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
+ GLint texture_format,
+ __DRIdrawable *dPriv)
+{
+ struct gl_framebuffer *fb = dPriv->driverPrivate;
+ struct intel_context *intel = pDRICtx->driverPrivate;
+ struct gl_context *ctx = &intel->ctx;
+ struct intel_texture_object *intelObj;
+ struct intel_renderbuffer *rb;
+ struct gl_texture_object *texObj;
+ struct gl_texture_image *texImage;
+ int level = 0, internalFormat = 0;
+ gl_format texFormat = MESA_FORMAT_NONE;
+
+ texObj = _mesa_get_current_tex_object(ctx, target);
+ intelObj = intel_texture_object(texObj);
+
+ if (!intelObj)
+ return;
+
+ if (dPriv->lastStamp != dPriv->dri2.stamp ||
+ !pDRICtx->driScreenPriv->dri2.useInvalidate)
+ intel_update_renderbuffers(pDRICtx, dPriv);
+
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ /* If the region isn't set, then intel_update_renderbuffers was unable
+ * to get the buffers for the drawable.
+ */
+ if (!rb || !rb->mt)
+ return;
+
+ if (rb->mt->cpp == 4) {
+ if (texture_format == __DRI_TEXTURE_FORMAT_RGB) {
+ internalFormat = GL_RGB;
+ texFormat = MESA_FORMAT_XRGB8888;
+ }
+ else {
+ internalFormat = GL_RGBA;
+ texFormat = MESA_FORMAT_ARGB8888;
+ }
+ } else if (rb->mt->cpp == 2) {
+ internalFormat = GL_RGB;
+ texFormat = MESA_FORMAT_RGB565;
+ }
+
+ _mesa_lock_texture(&intel->ctx, texObj);
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ intel_set_texture_image_region(ctx, texImage, rb->mt->region, target,
+ internalFormat, texFormat, 0);
+ _mesa_unlock_texture(&intel->ctx, texObj);
+}
+
+void
+intelSetTexBuffer(__DRIcontext *pDRICtx, GLint target, __DRIdrawable *dPriv)
+{
+ /* The old interface didn't have the format argument, so copy our
+ * implementation's behavior at the time.
+ */
+ intelSetTexBuffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
+}
+
+#if FEATURE_OES_EGL_image
+static void
+intel_image_target_texture_2d(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLeglImageOES image_handle)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = intel->intelScreen->driScrnPriv;
+ image = screen->dri2.image->lookupEGLImage(screen, image_handle,
+ screen->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ intel_set_texture_image_region(ctx, texImage, image->region,
+ target, image->internal_format,
+ image->format, image->offset);
+}
+#endif
+
+void
+intelInitTextureImageFuncs(struct dd_function_table *functions)
+{
+ functions->TexImage = intelTexImage;
+
+#if FEATURE_OES_EGL_image
+ functions->EGLImageTargetTexture2D = intel_image_target_texture_2d;
+#endif
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_layout.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_layout.c
index fe61b441945..65645bc46a4 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_layout.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_layout.c
@@ -1 +1,206 @@
-../intel/intel_tex_layout.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+#include "intel_mipmap_tree.h"
+#include "intel_tex_layout.h"
+#include "intel_context.h"
+
+#include "main/image.h"
+#include "main/macros.h"
+
+static unsigned int
+intel_horizontal_texture_alignment_unit(struct intel_context *intel,
+ gl_format format)
+{
+ /**
+ * From the "Alignment Unit Size" section of various specs, namely:
+ * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
+ * - i965 and G45 PRMs: Volume 1, Section 6.17.3.4.
+ * - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4
+ * - BSpec (for Ivybridge and slight variations in separate stencil)
+ *
+ * +----------------------------------------------------------------------+
+ * | | alignment unit width ("i") |
+ * | Surface Property |-----------------------------|
+ * | | 915 | 965 | ILK | SNB | IVB |
+ * +----------------------------------------------------------------------+
+ * | YUV 4:2:2 format | 8 | 4 | 4 | 4 | 4 |
+ * | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 |
+ * | FXT1 compressed format | 8 | 8 | 8 | 8 | 8 |
+ * | Depth Buffer (16-bit) | 4 | 4 | 4 | 4 | 8 |
+ * | Depth Buffer (other) | 4 | 4 | 4 | 4 | 4 |
+ * | Separate Stencil Buffer | N/A | N/A | 8 | 8 | 8 |
+ * | All Others | 4 | 4 | 4 | 4 | 4 |
+ * +----------------------------------------------------------------------+
+ *
+ * On IVB+, non-special cases can be overridden by setting the SURFACE_STATE
+ * "Surface Horizontal Alignment" field to HALIGN_4 or HALIGN_8.
+ */
+ if (_mesa_is_format_compressed(format)) {
+ /* The hardware alignment requirements for compressed textures
+ * happen to match the block boundaries.
+ */
+ unsigned int i, j;
+ _mesa_get_format_block_size(format, &i, &j);
+ return i;
+ }
+
+ if (format == MESA_FORMAT_S8)
+ return 8;
+
+ if (intel->gen >= 7 && format == MESA_FORMAT_Z16)
+ return 8;
+
+ return 4;
+}
+
+static unsigned int
+intel_vertical_texture_alignment_unit(struct intel_context *intel,
+ gl_format format)
+{
+ /**
+ * From the "Alignment Unit Size" section of various specs, namely:
+ * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
+ * - i965 and G45 PRMs: Volume 1, Section 6.17.3.4.
+ * - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4
+ * - BSpec (for Ivybridge and slight variations in separate stencil)
+ *
+ * +----------------------------------------------------------------------+
+ * | | alignment unit height ("j") |
+ * | Surface Property |-----------------------------|
+ * | | 915 | 965 | ILK | SNB | IVB |
+ * +----------------------------------------------------------------------+
+ * | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 |
+ * | FXT1 compressed format | 4 | 4 | 4 | 4 | 4 |
+ * | Depth Buffer | 2 | 2 | 2 | 4 | 4 |
+ * | Separate Stencil Buffer | N/A | N/A | N/A | 4 | 8 |
+ * | Multisampled (4x or 8x) render target | N/A | N/A | N/A | 4 | 4 |
+ * | All Others | 2 | 2 | 2 | 2 | 2 |
+ * +----------------------------------------------------------------------+
+ *
+ * On SNB+, non-special cases can be overridden by setting the SURFACE_STATE
+ * "Surface Vertical Alignment" field to VALIGN_2 or VALIGN_4.
+ *
+ * We currently don't support multisampling.
+ */
+ if (_mesa_is_format_compressed(format))
+ return 4;
+
+ if (format == MESA_FORMAT_S8)
+ return intel->gen >= 7 ? 8 : 4;
+
+ GLenum base_format = _mesa_get_format_base_format(format);
+
+ if (intel->gen >= 6 &&
+ (base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL)) {
+ return 4;
+ }
+
+ return 2;
+}
+
+void
+intel_get_texture_alignment_unit(struct intel_context *intel,
+ gl_format format,
+ unsigned int *w, unsigned int *h)
+{
+ *w = intel_horizontal_texture_alignment_unit(intel, format);
+ *h = intel_vertical_texture_alignment_unit(intel, format);
+}
+
+void i945_miptree_layout_2d(struct intel_mipmap_tree *mt)
+{
+ GLuint level;
+ GLuint x = 0;
+ GLuint y = 0;
+ GLuint width = mt->width0;
+ GLuint height = mt->height0;
+ GLuint depth = mt->depth0; /* number of array layers. */
+
+ mt->total_width = mt->width0;
+
+ if (mt->compressed) {
+ mt->total_width = ALIGN(mt->width0, mt->align_w);
+ }
+
+ /* May need to adjust width to accomodate the placement of
+ * the 2nd mipmap. This occurs when the alignment
+ * constraints of mipmap placement push the right edge of the
+ * 2nd mipmap out past the width of its parent.
+ */
+ if (mt->first_level != mt->last_level) {
+ GLuint mip1_width;
+
+ if (mt->compressed) {
+ mip1_width = ALIGN(minify(mt->width0), mt->align_w)
+ + ALIGN(minify(minify(mt->width0)), mt->align_w);
+ } else {
+ mip1_width = ALIGN(minify(mt->width0), mt->align_w)
+ + minify(minify(mt->width0));
+ }
+
+ if (mip1_width > mt->total_width) {
+ mt->total_width = mip1_width;
+ }
+ }
+
+ mt->total_height = 0;
+
+ for ( level = mt->first_level ; level <= mt->last_level ; level++ ) {
+ GLuint img_height;
+
+ intel_miptree_set_level_info(mt, level, x, y, width,
+ height, depth);
+
+ img_height = ALIGN(height, mt->align_h);
+ if (mt->compressed)
+ img_height /= mt->align_h;
+
+ /* Because the images are packed better, the final offset
+ * might not be the maximal one:
+ */
+ mt->total_height = MAX2(mt->total_height, y + img_height);
+
+ /* Layout_below: step right after second mipmap.
+ */
+ if (level == mt->first_level + 1) {
+ x += ALIGN(width, mt->align_w);
+ }
+ else {
+ y += img_height;
+ }
+
+ width = minify(width);
+ height = minify(height);
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_subimage.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_subimage.c
index b3a8a3d7ca7..ae4b3bca305 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_subimage.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_subimage.c
@@ -1 +1,177 @@
-../intel/intel_tex_subimage.c \ No newline at end of file
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mtypes.h"
+#include "main/pbo.h"
+#include "main/texobj.h"
+#include "main/texstore.h"
+#include "main/texcompress.h"
+#include "main/enums.h"
+
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "intel_mipmap_tree.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static bool
+intel_blit_texsubimage(struct gl_context * ctx,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLuint dstRowStride = 0;
+ drm_intel_bo *temp_bo = NULL;
+ unsigned int blit_x = 0, blit_y = 0;
+ unsigned long pitch;
+ uint32_t tiling_mode = I915_TILING_NONE;
+ GLubyte *dstMap;
+
+ /* Try to do a blit upload of the subimage if the texture is
+ * currently busy.
+ */
+ if (!intelImage->mt)
+ return false;
+
+ /* The blitter can't handle Y tiling */
+ if (intelImage->mt->region->tiling == I915_TILING_Y)
+ return false;
+
+ if (texImage->TexObject->Target != GL_TEXTURE_2D)
+ return false;
+
+ /* On gen6, it's probably not worth swapping to the blit ring to do
+ * this because of all the overhead involved.
+ */
+ if (intel->gen >= 6)
+ return false;
+
+ if (!drm_intel_bo_busy(intelImage->mt->region->bo))
+ return false;
+
+ DBG("BLT subimage %s target %s level %d offset %d,%d %dx%d\n",
+ __FUNCTION__,
+ _mesa_lookup_enum_by_nr(texImage->TexObject->Target),
+ texImage->Level, xoffset, yoffset, width, height);
+
+ pixels = _mesa_validate_pbo_teximage(ctx, 2, width, height, 1,
+ format, type, pixels, packing,
+ "glTexSubImage");
+ if (!pixels)
+ return false;
+
+ temp_bo = drm_intel_bo_alloc_tiled(intel->bufmgr,
+ "subimage blit bo",
+ width, height,
+ intelImage->mt->cpp,
+ &tiling_mode,
+ &pitch,
+ 0);
+ if (temp_bo == NULL) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ return false;
+ }
+
+ if (drm_intel_gem_bo_map_gtt(temp_bo)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ return false;
+ }
+
+ dstMap = temp_bo->virtual;
+ dstRowStride = pitch;
+
+ intel_miptree_get_image_offset(intelImage->mt, texImage->Level,
+ intelImage->base.Base.Face, 0,
+ &blit_x, &blit_y);
+ blit_x += xoffset;
+ blit_y += yoffset;
+
+ if (!_mesa_texstore(ctx, 2, texImage->_BaseFormat,
+ texImage->TexFormat,
+ dstRowStride,
+ &dstMap,
+ width, height, 1,
+ format, type, pixels, packing)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ }
+
+ bool ret;
+ unsigned int dst_pitch = intelImage->mt->region->pitch *
+ intelImage->mt->cpp;
+
+ drm_intel_gem_bo_unmap_gtt(temp_bo);
+
+ ret = intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ dstRowStride / intelImage->mt->cpp,
+ temp_bo, 0, false,
+ dst_pitch / intelImage->mt->cpp,
+ intelImage->mt->region->bo, 0,
+ intelImage->mt->region->tiling,
+ 0, 0, blit_x, blit_y, width, height,
+ GL_COPY);
+ assert(ret);
+
+ drm_intel_bo_unreference(temp_bo);
+ _mesa_unmap_teximage_pbo(ctx, packing);
+
+ return true;
+}
+
+static void
+intelTexSubImage(struct gl_context * ctx,
+ GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing)
+{
+ /* The intel_blit_texsubimage() function only handles 2D images */
+ if (dims != 2 || !intel_blit_texsubimage(ctx, texImage,
+ xoffset, yoffset,
+ width, height,
+ format, type, pixels, packing)) {
+ _mesa_store_texsubimage(ctx, dims, texImage,
+ xoffset, yoffset, zoffset,
+ width, height, depth,
+ format, type, pixels, packing);
+ }
+}
+
+void
+intelInitTextureSubImageFuncs(struct dd_function_table *functions)
+{
+ functions->TexSubImage = intelTexSubImage;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_validate.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_validate.c
index 41a75674c27..578b4173512 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_validate.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i915/intel_tex_validate.c
@@ -1 +1,218 @@
-../intel/intel_tex_validate.c \ No newline at end of file
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/samplerobj.h"
+#include "main/texobj.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_blit.h"
+#include "intel_tex.h"
+#include "intel_tex_layout.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * When validating, we only care about the texture images that could
+ * be seen, so for non-mipmapped modes we want to ignore everything
+ * but BaseLevel.
+ */
+static void
+intel_update_max_level(struct intel_texture_object *intelObj,
+ struct gl_sampler_object *sampler)
+{
+ struct gl_texture_object *tObj = &intelObj->base;
+
+ if (sampler->MinFilter == GL_NEAREST ||
+ sampler->MinFilter == GL_LINEAR) {
+ intelObj->_MaxLevel = tObj->BaseLevel;
+ } else {
+ intelObj->_MaxLevel = tObj->_MaxLevel;
+ }
+}
+
+/*
+ */
+GLuint
+intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+ struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
+ GLuint face, i;
+ GLuint nr_faces = 0;
+ struct intel_texture_image *firstImage;
+ int width, height, depth;
+
+ /* TBOs require no validation -- they always just point to their BO. */
+ if (tObj->Target == GL_TEXTURE_BUFFER)
+ return true;
+
+ /* We know/require this is true by now:
+ */
+ assert(intelObj->base._BaseComplete);
+
+ /* What levels must the tree include at a minimum?
+ */
+ intel_update_max_level(intelObj, sampler);
+ firstImage = intel_texture_image(tObj->Image[0][tObj->BaseLevel]);
+
+ /* Check tree can hold all active levels. Check tree matches
+ * target, imageFormat, etc.
+ *
+ * For pre-gen4, we have to match first_level == tObj->BaseLevel,
+ * because we don't have the control that gen4 does to make min/mag
+ * determination happen at a nonzero (hardware) baselevel. Because
+ * of that, we just always relayout on baselevel change.
+ */
+ if (intelObj->mt &&
+ (!intel_miptree_match_image(intelObj->mt, &firstImage->base.Base) ||
+ intelObj->mt->first_level != tObj->BaseLevel ||
+ intelObj->mt->last_level < intelObj->_MaxLevel)) {
+ intel_miptree_release(&intelObj->mt);
+ }
+
+
+ /* May need to create a new tree:
+ */
+ if (!intelObj->mt) {
+ intel_miptree_get_dimensions_for_image(&firstImage->base.Base,
+ &width, &height, &depth);
+
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ firstImage->base.Base.TexFormat,
+ tObj->BaseLevel,
+ intelObj->_MaxLevel,
+ width,
+ height,
+ depth,
+ true,
+ 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_NONE);
+ if (!intelObj->mt)
+ return false;
+ }
+
+ /* Pull in any images not in the object's tree:
+ */
+ nr_faces = _mesa_num_tex_faces(intelObj->base.Target);
+ for (face = 0; face < nr_faces; face++) {
+ for (i = tObj->BaseLevel; i <= intelObj->_MaxLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+ /* skip too small size mipmap */
+ if (intelImage == NULL)
+ break;
+
+ if (intelObj->mt != intelImage->mt) {
+ intel_miptree_copy_teximage(intel, intelImage, intelObj->mt);
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
+ */
+static void
+intel_tex_map_image_for_swrast(struct intel_context *intel,
+ struct intel_texture_image *intel_image,
+ GLbitfield mode)
+{
+ int level;
+ int face;
+ struct intel_mipmap_tree *mt;
+ unsigned int x, y;
+
+ if (!intel_image || !intel_image->mt)
+ return;
+
+ level = intel_image->base.Base.Level;
+ face = intel_image->base.Base.Face;
+ mt = intel_image->mt;
+
+ for (int i = 0; i < mt->level[level].depth; i++)
+ intel_miptree_slice_resolve_depth(intel, mt, level, i);
+
+ if (mt->target == GL_TEXTURE_3D ||
+ mt->target == GL_TEXTURE_2D_ARRAY ||
+ mt->target == GL_TEXTURE_1D_ARRAY) {
+ int i;
+
+ /* ImageOffsets[] is only used for swrast's fetch_texel_3d, so we can't
+ * share code with the normal path.
+ */
+ for (i = 0; i < mt->level[level].depth; i++) {
+ intel_miptree_get_image_offset(mt, level, face, i, &x, &y);
+ intel_image->base.ImageOffsets[i] = x + y * mt->region->pitch;
+ }
+
+ DBG("%s \n", __FUNCTION__);
+
+ intel_image->base.Map = intel_region_map(intel, mt->region, mode);
+ } else {
+ assert(intel_image->base.Base.Depth == 1);
+ intel_miptree_get_image_offset(mt, level, face, 0, &x, &y);
+
+ DBG("%s: (%d,%d) -> (%d, %d)/%d\n",
+ __FUNCTION__, face, level, x, y, mt->region->pitch * mt->cpp);
+
+ intel_image->base.Map = intel_region_map(intel, mt->region, mode) +
+ (x + y * mt->region->pitch) * mt->cpp;
+ }
+
+ intel_image->base.RowStride = mt->region->pitch;
+}
+
+static void
+intel_tex_unmap_image_for_swrast(struct intel_context *intel,
+ struct intel_texture_image *intel_image)
+{
+ if (intel_image && intel_image->mt) {
+ intel_region_unmap(intel, intel_image->mt->region);
+ intel_image->base.Map = NULL;
+ }
+}
+
+/**
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
+ */
+void
+intel_tex_map_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ GLbitfield mode)
+{
+ GLuint nr_faces = _mesa_num_tex_faces(intelObj->base.Target);
+ int i, face;
+
+ DBG("%s\n", __FUNCTION__);
+
+ for (i = intelObj->base.BaseLevel; i <= intelObj->_MaxLevel; i++) {
+ for (face = 0; face < nr_faces; face++) {
+ struct intel_texture_image *intel_image =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ intel_tex_map_image_for_swrast(intel, intel_image, mode);
+ }
+ }
+}
+
+void
+intel_tex_unmap_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ GLuint nr_faces = _mesa_num_tex_faces(intelObj->base.Target);
+ int i, face;
+
+ for (i = intelObj->base.BaseLevel; i <= intelObj->_MaxLevel; i++) {
+ for (face = 0; face < nr_faces; face++) {
+ struct intel_texture_image *intel_image =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ intel_tex_unmap_image_for_swrast(intel, intel_image);
+ }
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index d38cdf31cc6..06cbaecc74d 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -1 +1,543 @@
-../intel/intel_batchbuffer.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffer_objects.h"
+#include "intel_reg.h"
+#include "intel_bufmgr.h"
+#include "intel_buffers.h"
+
+struct cached_batch_item {
+ struct cached_batch_item *next;
+ uint16_t header;
+ uint16_t size;
+};
+
+static void clear_cache( struct intel_context *intel )
+{
+ struct cached_batch_item *item = intel->batch.cached_items;
+
+ while (item) {
+ struct cached_batch_item *next = item->next;
+ free(item);
+ item = next;
+ }
+
+ intel->batch.cached_items = NULL;
+}
+
+void
+intel_batchbuffer_init(struct intel_context *intel)
+{
+ intel_batchbuffer_reset(intel);
+
+ if (intel->gen >= 6) {
+ /* We can't just use brw_state_batch to get a chunk of space for
+ * the gen6 workaround because it involves actually writing to
+ * the buffer, and the kernel doesn't let us write to the batch.
+ */
+ intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
+ "pipe_control workaround",
+ 4096, 4096);
+ }
+}
+
+void
+intel_batchbuffer_reset(struct intel_context *intel)
+{
+ if (intel->batch.last_bo != NULL) {
+ drm_intel_bo_unreference(intel->batch.last_bo);
+ intel->batch.last_bo = NULL;
+ }
+ intel->batch.last_bo = intel->batch.bo;
+
+ clear_cache(intel);
+
+ intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
+ intel->maxBatchSize, 4096);
+
+ intel->batch.reserved_space = BATCH_RESERVED;
+ intel->batch.state_batch_offset = intel->batch.bo->size;
+ intel->batch.used = 0;
+ intel->batch.needs_sol_reset = false;
+}
+
+void
+intel_batchbuffer_save_state(struct intel_context *intel)
+{
+ intel->batch.saved.used = intel->batch.used;
+ intel->batch.saved.reloc_count =
+ drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
+}
+
+void
+intel_batchbuffer_reset_to_saved(struct intel_context *intel)
+{
+ drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
+
+ intel->batch.used = intel->batch.saved.used;
+
+ /* Cached batch state is dead, since we just cleared some unknown part of the
+ * batchbuffer. Assume that the caller resets any other state necessary.
+ */
+ clear_cache(intel);
+}
+
+void
+intel_batchbuffer_free(struct intel_context *intel)
+{
+ drm_intel_bo_unreference(intel->batch.last_bo);
+ drm_intel_bo_unreference(intel->batch.bo);
+ drm_intel_bo_unreference(intel->batch.workaround_bo);
+ clear_cache(intel);
+}
+
+static void
+do_batch_dump(struct intel_context *intel)
+{
+ struct drm_intel_decode *decode;
+ struct intel_batchbuffer *batch = &intel->batch;
+ int ret;
+
+ decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
+ if (!decode)
+ return;
+
+ ret = drm_intel_bo_map(batch->bo, false);
+ if (ret == 0) {
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->bo->virtual,
+ batch->bo->offset,
+ batch->used);
+ } else {
+ fprintf(stderr,
+ "WARNING: failed to map batchbuffer (%s), "
+ "dumping uploaded data instead.\n", strerror(ret));
+
+ drm_intel_decode_set_batch_pointer(decode,
+ batch->map,
+ batch->bo->offset,
+ batch->used);
+ }
+
+ drm_intel_decode(decode);
+
+ drm_intel_decode_context_free(decode);
+
+ if (ret == 0) {
+ drm_intel_bo_unmap(batch->bo);
+
+ if (intel->vtbl.debug_batch != NULL)
+ intel->vtbl.debug_batch(intel);
+ }
+}
+
+/* TODO: Push this whole function into bufmgr.
+ */
+static int
+do_flush_locked(struct intel_context *intel)
+{
+ struct intel_batchbuffer *batch = &intel->batch;
+ int ret = 0;
+
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
+ ret = drm_intel_bo_subdata(batch->bo,
+ batch->state_batch_offset,
+ batch->bo->size - batch->state_batch_offset,
+ (char *)batch->map + batch->state_batch_offset);
+ }
+
+ if (!intel->intelScreen->no_hw) {
+ int flags;
+
+ if (intel->gen < 6 || !batch->is_blit) {
+ flags = I915_EXEC_RENDER;
+ } else {
+ flags = I915_EXEC_BLT;
+ }
+
+ if (batch->needs_sol_reset)
+ flags |= I915_EXEC_GEN7_SOL_RESET;
+
+ if (ret == 0) {
+ if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
+ intel->vtbl.annotate_aub(intel);
+ if (intel->hw_ctx == NULL || batch->is_blit) {
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
+ flags);
+ } else {
+ ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
+ 4 * batch->used, flags);
+ }
+ }
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+ do_batch_dump(intel);
+
+ if (ret != 0) {
+ fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
+ exit(1);
+ }
+ intel->vtbl.new_batch(intel);
+
+ return ret;
+}
+
+int
+_intel_batchbuffer_flush(struct intel_context *intel,
+ const char *file, int line)
+{
+ int ret;
+
+ if (intel->batch.used == 0)
+ return 0;
+
+ if (intel->first_post_swapbuffers_batch == NULL) {
+ intel->first_post_swapbuffers_batch = intel->batch.bo;
+ drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+ fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
+ 4*intel->batch.used);
+
+ intel->batch.reserved_space = 0;
+
+ if (intel->vtbl.finish_batch)
+ intel->vtbl.finish_batch(intel);
+
+ /* Mark the end of the buffer. */
+ intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
+ if (intel->batch.used & 1) {
+ /* Round batchbuffer usage to 2 DWORDs. */
+ intel_batchbuffer_emit_dword(intel, MI_NOOP);
+ }
+
+ intel_upload_finish(intel);
+
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!intel->no_batch_wrap);
+
+ ret = do_flush_locked(intel);
+
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
+ fprintf(stderr, "waiting for idle\n");
+ drm_intel_bo_wait_rendering(intel->batch.bo);
+ }
+
+ /* Reset the buffer:
+ */
+ intel_batchbuffer_reset(intel);
+
+ return ret;
+}
+
+
+/* This is the only way buffers get added to the validate list.
+ */
+bool
+intel_batchbuffer_emit_reloc(struct intel_context *intel,
+ drm_intel_bo *buffer,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret;
+
+ ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
+ buffer, delta,
+ read_domains, write_domain);
+ assert(ret == 0);
+ (void)ret;
+
+ /*
+ * Using the old buffer offset, write in what the right data would be, in case
+ * the buffer doesn't move and we can short-circuit the relocation processing
+ * in the kernel
+ */
+ intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+
+ return true;
+}
+
+bool
+intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+ drm_intel_bo *buffer,
+ uint32_t read_domains,
+ uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret;
+
+ ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
+ buffer, delta,
+ read_domains, write_domain);
+ assert(ret == 0);
+ (void)ret;
+
+ /*
+ * Using the old buffer offset, write in what the right data would
+ * be, in case the buffer doesn't move and we can short-circuit the
+ * relocation processing in the kernel
+ */
+ intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+
+ return true;
+}
+
+void
+intel_batchbuffer_data(struct intel_context *intel,
+ const void *data, GLuint bytes, bool is_blit)
+{
+ assert((bytes & 3) == 0);
+ intel_batchbuffer_require_space(intel, bytes, is_blit);
+ __memcpy(intel->batch.map + intel->batch.used, data, bytes);
+ intel->batch.used += bytes >> 2;
+}
+
+void
+intel_batchbuffer_cached_advance(struct intel_context *intel)
+{
+ struct cached_batch_item **prev = &intel->batch.cached_items, *item;
+ uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
+ uint32_t *start = intel->batch.map + intel->batch.emit;
+ uint16_t op = *start >> 16;
+
+ while (*prev) {
+ uint32_t *old;
+
+ item = *prev;
+ old = intel->batch.map + item->header;
+ if (op == *old >> 16) {
+ if (item->size == sz && memcmp(old, start, sz) == 0) {
+ if (prev != &intel->batch.cached_items) {
+ *prev = item->next;
+ item->next = intel->batch.cached_items;
+ intel->batch.cached_items = item;
+ }
+ intel->batch.used = intel->batch.emit;
+ return;
+ }
+
+ goto emit;
+ }
+ prev = &item->next;
+ }
+
+ item = malloc(sizeof(struct cached_batch_item));
+ if (item == NULL)
+ return;
+
+ item->next = intel->batch.cached_items;
+ intel->batch.cached_items = item;
+
+emit:
+ item->size = sz;
+ item->header = intel->batch.emit;
+}
+
+/**
+ * Restriction [DevSNB, DevIVB]:
+ *
+ * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
+ * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
+ * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
+ * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
+ * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
+ * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
+ * unless SW can otherwise guarantee that the pipeline from WM onwards is
+ * already flushed (e.g., via a preceding MI_FLUSH).
+ */
+void
+intel_emit_depth_stall_flushes(struct intel_context *intel)
+{
+ assert(intel->gen >= 6 && intel->gen <= 7);
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH()
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_CACHE_FLUSH);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+}
+
+/**
+ * From the BSpec, volume 2a.03: VS Stage Input / State:
+ * "[DevIVB] A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
+ * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
+ * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
+ * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
+ * to be sent before any combination of VS associated 3DSTATE."
+ */
+void
+gen7_emit_vs_workaround_flush(struct intel_context *intel)
+{
+ assert(intel->gen == 7);
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
+ OUT_RELOC(intel->batch.workaround_bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+void
+intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
+{
+ if (!intel->batch.need_workaround_flush)
+ return;
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ OUT_BATCH(0); /* address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
+ OUT_RELOC(intel->batch.workaround_bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+
+ intel->batch.need_workaround_flush = false;
+}
+
+/* Emit a pipelined flush to either flush render and texture cache for
+ * reading from a FBO-drawn texture, or flush so that frontbuffer
+ * render appears on the screen in DRI1.
+ *
+ * This is also used for the always_flush_cache driconf debug option.
+ */
+void
+intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
+{
+ if (intel->gen >= 6) {
+ if (intel->batch.is_blit) {
+ BEGIN_BATCH_BLT(4);
+ OUT_BATCH(MI_FLUSH_DW);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ } else {
+ if (intel->gen == 6) {
+ /* Hardware workaround: SNB B-Spec says:
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
+ * Flush Enable =1, a PIPE_CONTROL with any non-zero
+ * post-sync-op is required.
+ */
+ intel_emit_post_sync_nonzero_flush(intel);
+ }
+
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
+ OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NO_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ }
+ } else if (intel->gen >= 4) {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_NO_WRITE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ } else {
+ BEGIN_BATCH(1);
+ OUT_BATCH(MI_FLUSH);
+ ADVANCE_BATCH();
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_blit.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_blit.c
index dd6c8d17c28..36a2c6aadac 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_blit.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_blit.c
@@ -1 +1,604 @@
-../intel/intel_blit.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/mtypes.h"
+#include "main/context.h"
+#include "main/enums.h"
+#include "main/colormac.h"
+#include "main/fbobject.h"
+
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_reg.h"
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+#include "intel_mipmap_tree.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+static GLuint translate_raster_op(GLenum logicop)
+{
+ switch(logicop) {
+ case GL_CLEAR: return 0x00;
+ case GL_AND: return 0x88;
+ case GL_AND_REVERSE: return 0x44;
+ case GL_COPY: return 0xCC;
+ case GL_AND_INVERTED: return 0x22;
+ case GL_NOOP: return 0xAA;
+ case GL_XOR: return 0x66;
+ case GL_OR: return 0xEE;
+ case GL_NOR: return 0x11;
+ case GL_EQUIV: return 0x99;
+ case GL_INVERT: return 0x55;
+ case GL_OR_REVERSE: return 0xDD;
+ case GL_COPY_INVERTED: return 0x33;
+ case GL_OR_INVERTED: return 0xBB;
+ case GL_NAND: return 0x77;
+ case GL_SET: return 0xFF;
+ default: return 0;
+ }
+}
+
+static uint32_t
+br13_for_cpp(int cpp)
+{
+ switch (cpp) {
+ case 4:
+ return BR13_8888;
+ break;
+ case 2:
+ return BR13_565;
+ break;
+ case 1:
+ return BR13_8;
+ break;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+/* Copy BitBlt
+ */
+bool
+intelEmitCopyBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort src_pitch,
+ drm_intel_bo *src_buffer,
+ GLuint src_offset,
+ uint32_t src_tiling,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort src_x, GLshort src_y,
+ GLshort dst_x, GLshort dst_y,
+ GLshort w, GLshort h,
+ GLenum logic_op)
+{
+ GLuint CMD, BR13, pass = 0;
+ int dst_y2 = dst_y + h;
+ int dst_x2 = dst_x + w;
+ drm_intel_bo *aper_array[3];
+ BATCH_LOCALS;
+
+ if (dst_tiling != I915_TILING_NONE) {
+ if (dst_offset & 4095)
+ return false;
+ if (dst_tiling == I915_TILING_Y)
+ return false;
+ }
+ if (src_tiling != I915_TILING_NONE) {
+ if (src_offset & 4095)
+ return false;
+ if (src_tiling == I915_TILING_Y)
+ return false;
+ }
+
+ /* do space check before going any further */
+ do {
+ aper_array[0] = intel->batch.bo;
+ aper_array[1] = dst_buffer;
+ aper_array[2] = src_buffer;
+
+ if (dri_bufmgr_check_aperture_space(aper_array, 3) != 0) {
+ intel_batchbuffer_flush(intel);
+ pass++;
+ } else
+ break;
+ } while (pass < 2);
+
+ if (pass >= 2)
+ return false;
+
+ intel_batchbuffer_require_space(intel, 8 * 4, true);
+ DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ src_buffer, src_pitch, src_offset, src_x, src_y,
+ dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
+
+ src_pitch *= cpp;
+ dst_pitch *= cpp;
+
+ /* Blit pitch must be dword-aligned. Otherwise, the hardware appears to drop
+ * the low bits.
+ */
+ assert(src_pitch % 4 == 0);
+ assert(dst_pitch % 4 == 0);
+
+ /* For big formats (such as floating point), do the copy using 32bpp and
+ * multiply the coordinates.
+ */
+ if (cpp > 4) {
+ assert(cpp % 4 == 0);
+ dst_x *= cpp / 4;
+ dst_x2 *= cpp / 4;
+ src_x *= cpp / 4;
+ cpp = 4;
+ }
+
+ BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ break;
+ default:
+ return false;
+ }
+
+#ifndef I915
+ if (dst_tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+ if (src_tiling != I915_TILING_NONE) {
+ CMD |= XY_SRC_TILED;
+ src_pitch /= 4;
+ }
+#endif
+
+ if (dst_y2 <= dst_y || dst_x2 <= dst_x) {
+ return true;
+ }
+
+ assert(dst_x < dst_x2);
+ assert(dst_y < dst_y2);
+
+ BEGIN_BATCH_BLT(8);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13 | (uint16_t)dst_pitch);
+ OUT_BATCH((dst_y << 16) | dst_x);
+ OUT_BATCH((dst_y2 << 16) | dst_x2);
+ OUT_RELOC_FENCED(dst_buffer,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ dst_offset);
+ OUT_BATCH((src_y << 16) | src_x);
+ OUT_BATCH((uint16_t)src_pitch);
+ OUT_RELOC_FENCED(src_buffer,
+ I915_GEM_DOMAIN_RENDER, 0,
+ src_offset);
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ return true;
+}
+
+
+/**
+ * Use blitting to clear the renderbuffers named by 'flags'.
+ * Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferIndexes field
+ * since that might include software renderbuffers or renderbuffers
+ * which we're clearing with triangles.
+ * \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
+ */
+GLbitfield
+intelClearWithBlit(struct gl_context *ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLuint clear_depth_value, clear_depth_mask;
+ GLint cx, cy, cw, ch;
+ GLbitfield fail_mask = 0;
+ BATCH_LOCALS;
+
+ /*
+ * Compute values for clearing the buffers.
+ */
+ clear_depth_value = 0;
+ clear_depth_mask = 0;
+ if (mask & BUFFER_BIT_DEPTH) {
+ clear_depth_value = (GLuint) (fb->_DepthMax * ctx->Depth.Clear);
+ clear_depth_mask = XY_BLT_WRITE_RGB;
+ }
+ if (mask & BUFFER_BIT_STENCIL) {
+ clear_depth_value |= (ctx->Stencil.Clear & 0xff) << 24;
+ clear_depth_mask |= XY_BLT_WRITE_ALPHA;
+ }
+
+ cx = fb->_Xmin;
+ if (_mesa_is_winsys_fbo(fb))
+ cy = ctx->DrawBuffer->Height - fb->_Ymax;
+ else
+ cy = fb->_Ymin;
+ cw = fb->_Xmax - fb->_Xmin;
+ ch = fb->_Ymax - fb->_Ymin;
+
+ if (cw == 0 || ch == 0)
+ return 0;
+
+ /* Loop over all renderbuffers */
+ mask &= (1 << BUFFER_COUNT) - 1;
+ while (mask) {
+ GLuint buf = ffs(mask) - 1;
+ bool is_depth_stencil = buf == BUFFER_DEPTH || buf == BUFFER_STENCIL;
+ struct intel_renderbuffer *irb;
+ int x1, y1, x2, y2;
+ uint32_t clear_val;
+ uint32_t BR13, CMD;
+ struct intel_region *region;
+ int pitch, cpp;
+ drm_intel_bo *aper_array[2];
+
+ mask &= ~(1 << buf);
+
+ irb = intel_get_renderbuffer(fb, buf);
+ if (irb && irb->mt) {
+ region = irb->mt->region;
+ assert(region);
+ assert(region->bo);
+ } else {
+ fail_mask |= 1 << buf;
+ continue;
+ }
+
+ /* OK, clear this renderbuffer */
+ x1 = cx + irb->draw_x;
+ y1 = cy + irb->draw_y;
+ x2 = cx + cw + irb->draw_x;
+ y2 = cy + ch + irb->draw_y;
+
+ pitch = region->pitch;
+ cpp = region->cpp;
+
+ DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ region->bo, (pitch * cpp),
+ x1, y1, x2 - x1, y2 - y1);
+
+ BR13 = 0xf0 << 16;
+ CMD = XY_COLOR_BLT_CMD;
+
+ /* Setup the blit command */
+ if (cpp == 4) {
+ if (is_depth_stencil) {
+ CMD |= clear_depth_mask;
+ } else {
+ /* clearing RGBA */
+ CMD |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ }
+ }
+
+ assert(region->tiling != I915_TILING_Y);
+
+#ifndef I915
+ if (region->tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ pitch /= 4;
+ }
+#endif
+ BR13 |= (pitch * cpp);
+
+ if (is_depth_stencil) {
+ clear_val = clear_depth_value;
+ } else {
+ uint8_t clear[4];
+ GLfloat *color = ctx->Color.ClearColor.f;
+
+ _mesa_unclamped_float_rgba_to_ubyte(clear, color);
+
+ switch (intel_rb_format(irb)) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ clear_val = PACK_COLOR_8888(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_RGB565:
+ clear_val = PACK_COLOR_565(clear[0], clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_ARGB4444:
+ clear_val = PACK_COLOR_4444(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_ARGB1555:
+ clear_val = PACK_COLOR_1555(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_A8:
+ clear_val = PACK_COLOR_8888(clear[3], clear[3],
+ clear[3], clear[3]);
+ break;
+ default:
+ fail_mask |= 1 << buf;
+ continue;
+ }
+ }
+
+ BR13 |= br13_for_cpp(cpp);
+
+ assert(x1 < x2);
+ assert(y1 < y2);
+
+ /* do space check before going any further */
+ aper_array[0] = intel->batch.bo;
+ aper_array[1] = region->bo;
+
+ if (drm_intel_bufmgr_check_aperture_space(aper_array,
+ ARRAY_SIZE(aper_array)) != 0) {
+ intel_batchbuffer_flush(intel);
+ }
+
+ BEGIN_BATCH_BLT(6);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y1 << 16) | x1);
+ OUT_BATCH((y2 << 16) | x2);
+ OUT_RELOC_FENCED(region->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ 0);
+ OUT_BATCH(clear_val);
+ ADVANCE_BATCH();
+
+ if (intel->always_flush_cache)
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL)
+ mask &= ~(BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL);
+ }
+
+ return fail_mask;
+}
+
+bool
+intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLubyte *src_bits, GLuint src_size,
+ GLuint fg_color,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort x, GLshort y,
+ GLshort w, GLshort h,
+ GLenum logic_op)
+{
+ int dwords = ALIGN(src_size, 8) / 4;
+ uint32_t opcode, br13, blit_cmd;
+
+ if (dst_tiling != I915_TILING_NONE) {
+ if (dst_offset & 4095)
+ return false;
+ if (dst_tiling == I915_TILING_Y)
+ return false;
+ }
+
+ assert( logic_op - GL_CLEAR >= 0 );
+ assert( logic_op - GL_CLEAR < 0x10 );
+ assert(dst_pitch > 0);
+
+ if (w < 0 || h < 0)
+ return true;
+
+ dst_pitch *= cpp;
+
+ DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
+ __FUNCTION__,
+ dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
+
+ intel_batchbuffer_require_space(intel,
+ (8 * 4) +
+ (3 * 4) +
+ dwords * 4, true);
+
+ opcode = XY_SETUP_BLT_CMD;
+ if (cpp == 4)
+ opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+#ifndef I915
+ if (dst_tiling != I915_TILING_NONE) {
+ opcode |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+#endif
+
+ br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
+ br13 |= br13_for_cpp(cpp);
+
+ blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
+ if (dst_tiling != I915_TILING_NONE)
+ blit_cmd |= XY_DST_TILED;
+
+ BEGIN_BATCH_BLT(8 + 3);
+ OUT_BATCH(opcode);
+ OUT_BATCH(br13);
+ OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
+ OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
+ OUT_RELOC_FENCED(dst_buffer,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ dst_offset);
+ OUT_BATCH(0); /* bg */
+ OUT_BATCH(fg_color); /* fg */
+ OUT_BATCH(0); /* pattern base addr */
+
+ OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
+ OUT_BATCH((y << 16) | x);
+ OUT_BATCH(((y + h) << 16) | (x + w));
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_data(intel, src_bits, dwords * 4, true);
+
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ return true;
+}
+
+/* We don't have a memmove-type blit like some other hardware, so we'll do a
+ * rectangular blit covering a large space, then emit 1-scanline blit at the
+ * end to cover the last if we need.
+ */
+void
+intel_emit_linear_blit(struct intel_context *intel,
+ drm_intel_bo *dst_bo,
+ unsigned int dst_offset,
+ drm_intel_bo *src_bo,
+ unsigned int src_offset,
+ unsigned int size)
+{
+ GLuint pitch, height;
+ bool ok;
+
+ /* The pitch given to the GPU must be DWORD aligned, and
+ * we want width to match pitch. Max width is (1 << 15 - 1),
+ * rounding that down to the nearest DWORD is 1 << 15 - 4
+ */
+ pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 1), 4);
+ height = (pitch == 0) ? 1 : size / pitch;
+ ok = intelEmitCopyBlit(intel, 1,
+ pitch, src_bo, src_offset, I915_TILING_NONE,
+ pitch, dst_bo, dst_offset, I915_TILING_NONE,
+ 0, 0, /* src x/y */
+ 0, 0, /* dst x/y */
+ pitch, height, /* w, h */
+ GL_COPY);
+ assert(ok);
+
+ src_offset += pitch * height;
+ dst_offset += pitch * height;
+ size -= pitch * height;
+ assert (size < (1 << 15));
+ pitch = ALIGN(size, 4);
+ if (size != 0) {
+ ok = intelEmitCopyBlit(intel, 1,
+ pitch, src_bo, src_offset, I915_TILING_NONE,
+ pitch, dst_bo, dst_offset, I915_TILING_NONE,
+ 0, 0, /* src x/y */
+ 0, 0, /* dst x/y */
+ size, 1, /* w, h */
+ GL_COPY);
+ assert(ok);
+ }
+}
+
+/**
+ * Used to initialize the alpha value of an ARGB8888 teximage after
+ * loading it from an XRGB8888 source.
+ *
+ * This is very common with glCopyTexImage2D().
+ */
+void
+intel_set_teximage_alpha_to_one(struct gl_context *ctx,
+ struct intel_texture_image *intel_image)
+{
+ struct intel_context *intel = intel_context(ctx);
+ unsigned int image_x, image_y;
+ uint32_t x1, y1, x2, y2;
+ uint32_t BR13, CMD;
+ int pitch, cpp;
+ drm_intel_bo *aper_array[2];
+ struct intel_region *region = intel_image->mt->region;
+ int width, height, depth;
+ BATCH_LOCALS;
+
+ intel_miptree_get_dimensions_for_image(&intel_image->base.Base,
+ &width, &height, &depth);
+ assert(depth == 1);
+
+ assert(intel_image->base.Base.TexFormat == MESA_FORMAT_ARGB8888);
+
+ /* get dest x/y in destination texture */
+ intel_miptree_get_image_offset(intel_image->mt,
+ intel_image->base.Base.Level,
+ intel_image->base.Base.Face,
+ 0,
+ &image_x, &image_y);
+
+ x1 = image_x;
+ y1 = image_y;
+ x2 = image_x + width;
+ y2 = image_y + height;
+
+ pitch = region->pitch;
+ cpp = region->cpp;
+
+ DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ intel_image->mt->region->bo, (pitch * cpp),
+ x1, y1, x2 - x1, y2 - y1);
+
+ BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
+ CMD = XY_COLOR_BLT_CMD;
+ CMD |= XY_BLT_WRITE_ALPHA;
+
+ assert(region->tiling != I915_TILING_Y);
+
+#ifndef I915
+ if (region->tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ pitch /= 4;
+ }
+#endif
+ BR13 |= (pitch * cpp);
+
+ /* do space check before going any further */
+ aper_array[0] = intel->batch.bo;
+ aper_array[1] = region->bo;
+
+ if (drm_intel_bufmgr_check_aperture_space(aper_array,
+ ARRAY_SIZE(aper_array)) != 0) {
+ intel_batchbuffer_flush(intel);
+ }
+
+ BEGIN_BATCH_BLT(6);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y1 << 16) | x1);
+ OUT_BATCH((y2 << 16) | x2);
+ OUT_RELOC_FENCED(region->bo,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ 0);
+ OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_emit_mi_flush(intel);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffer_objects.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffer_objects.c
index e06dd3c8d3c..df8ac7fb301 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffer_objects.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffer_objects.c
@@ -1 +1,849 @@
-../intel/intel_buffer_objects.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/imports.h"
+#include "main/mfeatures.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/bufferobj.h"
+
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+
+static GLboolean
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
+
+/** Allocates a new drm_intel_bo to store the data for the buffer object. */
+static void
+intel_bufferobj_alloc_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
+ intel_obj->Base.Size, 64);
+}
+
+static void
+release_buffer(struct intel_buffer_object *intel_obj)
+{
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_obj->buffer = NULL;
+ intel_obj->offset = 0;
+ intel_obj->source = 0;
+}
+
+/**
+ * There is some duplication between mesa's bufferobjects and our
+ * bufmgr buffers. Both have an integer handle and a hashtable to
+ * lookup an opaque structure. It would be nice if the handles and
+ * internal structure where somehow shared.
+ */
+static struct gl_buffer_object *
+intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
+{
+ struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
+
+ _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
+
+ obj->buffer = NULL;
+
+ return &obj->Base;
+}
+
+/**
+ * Deallocate/free a vertex/pixel buffer object.
+ * Called via glDeleteBuffersARB().
+ */
+static void
+intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ /* Buffer objects are automatically unmapped when deleting according
+ * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
+ * (though it does if you call glDeleteBuffers)
+ */
+ if (obj->Pointer)
+ intel_bufferobj_unmap(ctx, obj);
+
+ free(intel_obj->sys_buffer);
+
+ drm_intel_bo_unreference(intel_obj->buffer);
+ free(intel_obj);
+}
+
+
+
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via ctx->Driver.BufferData().
+ * \return true for success, false if out of memory
+ */
+static GLboolean
+intel_bufferobj_data(struct gl_context * ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const GLvoid * data,
+ GLenum usage, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ /* Part of the ABI, but this function doesn't use it.
+ */
+#ifndef I915
+ (void) target;
+#endif
+
+ intel_obj->Base.Size = size;
+ intel_obj->Base.Usage = usage;
+
+ assert(!obj->Pointer); /* Mesa should have unmapped it */
+
+ if (intel_obj->buffer != NULL)
+ release_buffer(intel_obj);
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+
+ if (size != 0) {
+#ifdef I915
+ /* On pre-965, stick VBOs in system memory, as we're always doing
+ * swtnl with their contents anyway.
+ */
+ if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
+ intel_obj->sys_buffer = malloc(size);
+ if (intel_obj->sys_buffer != NULL) {
+ if (data != NULL)
+ memcpy(intel_obj->sys_buffer, data, size);
+ return true;
+ }
+ }
+#endif
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ if (!intel_obj->buffer)
+ return false;
+
+ if (data != NULL)
+ drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
+ }
+
+ return true;
+}
+
+
+/**
+ * Replace data in a subrange of buffer object. If the data range
+ * specified by size + offset extends beyond the end of the buffer or
+ * if data is NULL, no copy is performed.
+ * Called via glBufferSubDataARB().
+ */
+static void
+intel_bufferobj_subdata(struct gl_context * ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ bool busy;
+
+ if (size == 0)
+ return;
+
+ assert(intel_obj);
+
+ /* If we have a single copy in system memory, update that */
+ if (intel_obj->sys_buffer) {
+ if (intel_obj->source)
+ release_buffer(intel_obj);
+
+ if (intel_obj->buffer == NULL) {
+ memcpy((char *)intel_obj->sys_buffer + offset, data, size);
+ return;
+ }
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ }
+
+ /* Otherwise we need to update the copy in video memory. */
+ busy =
+ drm_intel_bo_busy(intel_obj->buffer) ||
+ drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
+
+ if (busy) {
+ if (size == intel_obj->Base.Size) {
+ /* Replace the current busy bo with fresh data. */
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
+ } else {
+ perf_debug("Using a blit copy to avoid stalling on glBufferSubData() "
+ "to a busy buffer object.\n");
+ drm_intel_bo *temp_bo =
+ drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
+
+ drm_intel_bo_subdata(temp_bo, 0, size, data);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, offset,
+ temp_bo, 0,
+ size);
+
+ drm_intel_bo_unreference(temp_bo);
+ }
+ } else {
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ if (drm_intel_bo_busy(intel_obj->buffer)) {
+ perf_debug("Stalling on the GPU in glBufferSubData().\n");
+ }
+ }
+ drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
+ }
+}
+
+
+/**
+ * Called via glGetBufferSubDataARB().
+ */
+static void
+intel_bufferobj_get_subdata(struct gl_context * ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ struct intel_context *intel = intel_context(ctx);
+
+ assert(intel_obj);
+ if (intel_obj->sys_buffer)
+ memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
+ else {
+ if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
+ intel_batchbuffer_flush(intel);
+ }
+ drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
+ }
+}
+
+
+
+/**
+ * Called via glMapBufferRange and glMapBuffer
+ *
+ * The goal of this extension is to allow apps to accumulate their rendering
+ * at the same time as they accumulate their buffer object. Without it,
+ * you'd end up blocking on execution of rendering every time you mapped
+ * the buffer to put new data in.
+ *
+ * We support it in 3 ways: If unsynchronized, then don't bother
+ * flushing the batchbuffer before mapping the buffer, which can save blocking
+ * in many cases. If we would still block, and they allow the whole buffer
+ * to be invalidated, then just allocate a new buffer to replace the old one.
+ * If not, and we'd block, and they allow the subrange of the buffer to be
+ * invalidated, then we can make a new little BO, let them write into that,
+ * and blit it into the real BO at unmap time.
+ */
+static void *
+intel_bufferobj_map_range(struct gl_context * ctx,
+ GLintptr offset, GLsizeiptr length,
+ GLbitfield access, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
+ * internally uses our functions directly.
+ */
+ obj->Offset = offset;
+ obj->Length = length;
+ obj->AccessFlags = access;
+
+ if (intel_obj->sys_buffer) {
+ const bool read_only =
+ (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
+
+ if (!read_only && intel_obj->source)
+ release_buffer(intel_obj);
+
+ if (!intel_obj->buffer || intel_obj->source) {
+ obj->Pointer = intel_obj->sys_buffer + offset;
+ return obj->Pointer;
+ }
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ }
+
+ if (intel_obj->buffer == NULL) {
+ obj->Pointer = NULL;
+ return NULL;
+ }
+
+ /* If the access is synchronized (like a normal buffer mapping), then get
+ * things flushed out so the later mapping syncs appropriately through GEM.
+ * If the user doesn't care about existing buffer contents and mapping would
+ * cause us to block, then throw out the old buffer.
+ *
+ * If they set INVALIDATE_BUFFER, we can pitch the current contents to
+ * achieve the required synchronization.
+ */
+ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
+ if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
+ if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ } else {
+ intel_flush(ctx);
+ }
+ } else if (drm_intel_bo_busy(intel_obj->buffer) &&
+ (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ }
+ }
+
+ /* If the user is mapping a range of an active buffer object but
+ * doesn't require the current contents of that range, make a new
+ * BO, and we'll copy what they put in there out at unmap or
+ * FlushRange time.
+ */
+ if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
+ drm_intel_bo_busy(intel_obj->buffer)) {
+ if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
+ intel_obj->range_map_buffer = malloc(length);
+ obj->Pointer = intel_obj->range_map_buffer;
+ } else {
+ intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
+ "range map",
+ length, 64);
+ if (!(access & GL_MAP_READ_BIT)) {
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ } else {
+ drm_intel_bo_map(intel_obj->range_map_bo,
+ (access & GL_MAP_WRITE_BIT) != 0);
+ }
+ obj->Pointer = intel_obj->range_map_bo->virtual;
+ }
+ return obj->Pointer;
+ }
+
+ if (access & GL_MAP_UNSYNCHRONIZED_BIT)
+ drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
+ else if (!(access & GL_MAP_READ_BIT)) {
+ drm_intel_gem_bo_map_gtt(intel_obj->buffer);
+ } else {
+ drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ }
+
+ obj->Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Pointer;
+}
+
+/* Ideally we'd use a BO to avoid taking up cache space for the temporary
+ * data, but FlushMappedBufferRange may be followed by further writes to
+ * the pointer, so we would have to re-map after emitting our blit, which
+ * would defeat the point.
+ */
+static void
+intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length,
+ struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ drm_intel_bo *temp_bo;
+
+ /* Unless we're in the range map using a temporary system buffer,
+ * there's no work to do.
+ */
+ if (intel_obj->range_map_buffer == NULL)
+ return;
+
+ if (length == 0)
+ return;
+
+ temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
+
+ drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, obj->Offset + offset,
+ temp_bo, 0,
+ length);
+
+ drm_intel_bo_unreference(temp_bo);
+}
+
+
+/**
+ * Called via glUnmapBuffer().
+ */
+static GLboolean
+intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ assert(obj->Pointer);
+ if (intel_obj->sys_buffer != NULL) {
+ /* always keep the mapping around. */
+ } else if (intel_obj->range_map_buffer != NULL) {
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+ free(intel_obj->range_map_buffer);
+ intel_obj->range_map_buffer = NULL;
+ } else if (intel_obj->range_map_bo != NULL) {
+ drm_intel_bo_unmap(intel_obj->range_map_bo);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, obj->Offset,
+ intel_obj->range_map_bo, 0,
+ obj->Length);
+
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ drm_intel_bo_unreference(intel_obj->range_map_bo);
+ intel_obj->range_map_bo = NULL;
+ } else if (intel_obj->buffer != NULL) {
+ drm_intel_bo_unmap(intel_obj->buffer);
+ }
+ obj->Pointer = NULL;
+ obj->Offset = 0;
+ obj->Length = 0;
+
+ return true;
+}
+
+drm_intel_bo *
+intel_bufferobj_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj,
+ GLuint flag)
+{
+ if (intel_obj->source)
+ release_buffer(intel_obj);
+
+ if (intel_obj->buffer == NULL) {
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ drm_intel_bo_subdata(intel_obj->buffer,
+ 0, intel_obj->Base.Size,
+ intel_obj->sys_buffer);
+
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ intel_obj->offset = 0;
+ }
+
+ return intel_obj->buffer;
+}
+
+#define INTEL_UPLOAD_SIZE (64*1024)
+
+void
+intel_upload_finish(struct intel_context *intel)
+{
+ if (!intel->upload.bo)
+ return;
+
+ if (intel->upload.buffer_len) {
+ drm_intel_bo_subdata(intel->upload.bo,
+ intel->upload.buffer_offset,
+ intel->upload.buffer_len,
+ intel->upload.buffer);
+ intel->upload.buffer_len = 0;
+ }
+
+ drm_intel_bo_unreference(intel->upload.bo);
+ intel->upload.bo = NULL;
+}
+
+static void wrap_buffers(struct intel_context *intel, GLuint size)
+{
+ intel_upload_finish(intel);
+
+ if (size < INTEL_UPLOAD_SIZE)
+ size = INTEL_UPLOAD_SIZE;
+
+ intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
+ intel->upload.offset = 0;
+}
+
+void intel_upload_data(struct intel_context *intel,
+ const void *ptr, GLuint size, GLuint align,
+ drm_intel_bo **return_bo,
+ GLuint *return_offset)
+{
+ GLuint base, delta;
+
+ base = (intel->upload.offset + align - 1) / align * align;
+ if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
+ wrap_buffers(intel, size);
+ base = 0;
+ }
+
+ drm_intel_bo_reference(intel->upload.bo);
+ *return_bo = intel->upload.bo;
+ *return_offset = base;
+
+ delta = base - intel->upload.offset;
+ if (intel->upload.buffer_len &&
+ intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
+ {
+ drm_intel_bo_subdata(intel->upload.bo,
+ intel->upload.buffer_offset,
+ intel->upload.buffer_len,
+ intel->upload.buffer);
+ intel->upload.buffer_len = 0;
+ }
+
+ if (size < sizeof(intel->upload.buffer))
+ {
+ if (intel->upload.buffer_len == 0)
+ intel->upload.buffer_offset = base;
+ else
+ intel->upload.buffer_len += delta;
+
+ memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
+ intel->upload.buffer_len += size;
+ }
+ else
+ {
+ drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
+ }
+
+ intel->upload.offset = base + size;
+}
+
+void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
+{
+ GLuint base, delta;
+ char *ptr;
+
+ base = (intel->upload.offset + align - 1) / align * align;
+ if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
+ wrap_buffers(intel, size);
+ base = 0;
+ }
+
+ delta = base - intel->upload.offset;
+ if (intel->upload.buffer_len &&
+ intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
+ {
+ drm_intel_bo_subdata(intel->upload.bo,
+ intel->upload.buffer_offset,
+ intel->upload.buffer_len,
+ intel->upload.buffer);
+ intel->upload.buffer_len = 0;
+ }
+
+ if (size <= sizeof(intel->upload.buffer)) {
+ if (intel->upload.buffer_len == 0)
+ intel->upload.buffer_offset = base;
+ else
+ intel->upload.buffer_len += delta;
+
+ ptr = intel->upload.buffer + intel->upload.buffer_len;
+ intel->upload.buffer_len += size;
+ } else
+ ptr = malloc(size);
+
+ return ptr;
+}
+
+void intel_upload_unmap(struct intel_context *intel,
+ const void *ptr, GLuint size, GLuint align,
+ drm_intel_bo **return_bo,
+ GLuint *return_offset)
+{
+ GLuint base;
+
+ base = (intel->upload.offset + align - 1) / align * align;
+ if (size > sizeof(intel->upload.buffer)) {
+ drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
+ free((void*)ptr);
+ }
+
+ drm_intel_bo_reference(intel->upload.bo);
+ *return_bo = intel->upload.bo;
+ *return_offset = base;
+
+ intel->upload.offset = base + size;
+}
+
+drm_intel_bo *
+intel_bufferobj_source(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj,
+ GLuint align, GLuint *offset)
+{
+ if (intel_obj->buffer == NULL) {
+ intel_upload_data(intel,
+ intel_obj->sys_buffer, intel_obj->Base.Size, align,
+ &intel_obj->buffer, &intel_obj->offset);
+ intel_obj->source = 1;
+ }
+
+ *offset = intel_obj->offset;
+ return intel_obj->buffer;
+}
+
+static void
+intel_bufferobj_copy_subdata(struct gl_context *ctx,
+ struct gl_buffer_object *src,
+ struct gl_buffer_object *dst,
+ GLintptr read_offset, GLintptr write_offset,
+ GLsizeiptr size)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_src = intel_buffer_object(src);
+ struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
+ drm_intel_bo *src_bo, *dst_bo;
+ GLuint src_offset;
+
+ if (size == 0)
+ return;
+
+ /* If we're in system memory, just map and memcpy. */
+ if (intel_src->sys_buffer || intel_dst->sys_buffer) {
+ /* The same buffer may be used, but note that regions copied may
+ * not overlap.
+ */
+ if (src == dst) {
+ char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
+ GL_MAP_READ_BIT |
+ GL_MAP_WRITE_BIT,
+ dst);
+ memmove(ptr + write_offset, ptr + read_offset, size);
+ intel_bufferobj_unmap(ctx, dst);
+ } else {
+ const char *src_ptr;
+ char *dst_ptr;
+
+ src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
+ GL_MAP_READ_BIT, src);
+ dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
+ GL_MAP_WRITE_BIT, dst);
+
+ memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
+
+ intel_bufferobj_unmap(ctx, src);
+ intel_bufferobj_unmap(ctx, dst);
+ }
+ return;
+ }
+
+ /* Otherwise, we have real BOs, so blit them. */
+
+ dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
+ src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
+
+ intel_emit_linear_blit(intel,
+ dst_bo, write_offset,
+ src_bo, read_offset + src_offset, size);
+
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+}
+
+#if FEATURE_APPLE_object_purgeable
+static GLenum
+intel_buffer_purgeable(drm_intel_bo *buffer)
+{
+ int retained = 0;
+
+ if (buffer != NULL)
+ retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
+
+ return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
+}
+
+static GLenum
+intel_buffer_object_purgeable(struct gl_context * ctx,
+ struct gl_buffer_object *obj,
+ GLenum option)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
+
+ if (intel_obj->buffer != NULL)
+ return intel_buffer_purgeable(intel_obj->buffer);
+
+ if (option == GL_RELEASED_APPLE) {
+ if (intel_obj->sys_buffer != NULL) {
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ }
+
+ return GL_RELEASED_APPLE;
+ } else {
+ /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
+ struct intel_context *intel = intel_context(ctx);
+ drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
+
+ return intel_buffer_purgeable(bo);
+ }
+}
+
+static GLenum
+intel_texture_object_purgeable(struct gl_context * ctx,
+ struct gl_texture_object *obj,
+ GLenum option)
+{
+ struct intel_texture_object *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_texture_object(obj);
+ if (intel->mt == NULL || intel->mt->region == NULL)
+ return GL_RELEASED_APPLE;
+
+ return intel_buffer_purgeable(intel->mt->region->bo);
+}
+
+static GLenum
+intel_render_object_purgeable(struct gl_context * ctx,
+ struct gl_renderbuffer *obj,
+ GLenum option)
+{
+ struct intel_renderbuffer *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_renderbuffer(obj);
+ if (intel->mt == NULL)
+ return GL_RELEASED_APPLE;
+
+ return intel_buffer_purgeable(intel->mt->region->bo);
+}
+
+static GLenum
+intel_buffer_unpurgeable(drm_intel_bo *buffer)
+{
+ int retained;
+
+ retained = 0;
+ if (buffer != NULL)
+ retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
+
+ return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
+}
+
+static GLenum
+intel_buffer_object_unpurgeable(struct gl_context * ctx,
+ struct gl_buffer_object *obj,
+ GLenum option)
+{
+ (void) ctx;
+ (void) option;
+
+ return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
+}
+
+static GLenum
+intel_texture_object_unpurgeable(struct gl_context * ctx,
+ struct gl_texture_object *obj,
+ GLenum option)
+{
+ struct intel_texture_object *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_texture_object(obj);
+ if (intel->mt == NULL || intel->mt->region == NULL)
+ return GL_UNDEFINED_APPLE;
+
+ return intel_buffer_unpurgeable(intel->mt->region->bo);
+}
+
+static GLenum
+intel_render_object_unpurgeable(struct gl_context * ctx,
+ struct gl_renderbuffer *obj,
+ GLenum option)
+{
+ struct intel_renderbuffer *intel;
+
+ (void) ctx;
+ (void) option;
+
+ intel = intel_renderbuffer(obj);
+ if (intel->mt == NULL)
+ return GL_UNDEFINED_APPLE;
+
+ return intel_buffer_unpurgeable(intel->mt->region->bo);
+}
+#endif
+
+void
+intelInitBufferObjectFuncs(struct dd_function_table *functions)
+{
+ functions->NewBufferObject = intel_bufferobj_alloc;
+ functions->DeleteBuffer = intel_bufferobj_free;
+ functions->BufferData = intel_bufferobj_data;
+ functions->BufferSubData = intel_bufferobj_subdata;
+ functions->GetBufferSubData = intel_bufferobj_get_subdata;
+ functions->MapBufferRange = intel_bufferobj_map_range;
+ functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
+ functions->UnmapBuffer = intel_bufferobj_unmap;
+ functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
+
+#if FEATURE_APPLE_object_purgeable
+ functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
+ functions->TextureObjectPurgeable = intel_texture_object_purgeable;
+ functions->RenderObjectPurgeable = intel_render_object_purgeable;
+
+ functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
+ functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
+ functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
+#endif
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffers.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffers.c
index c86daa49f47..9a9a259c9e5 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffers.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_buffers.c
@@ -1 +1,132 @@
-../intel/intel_buffers.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+
+/**
+ * Return pointer to current color drawing region, or NULL.
+ */
+struct intel_region *
+intel_drawbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irbColor =
+ intel_renderbuffer(intel->ctx.DrawBuffer->_ColorDrawBuffers[0]);
+ if (irbColor && irbColor->mt)
+ return irbColor->mt->region;
+ else
+ return NULL;
+}
+
+/**
+ * Return pointer to current color reading region, or NULL.
+ */
+struct intel_region *
+intel_readbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irb
+ = intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
+ if (irb && irb->mt)
+ return irb->mt->region;
+ else
+ return NULL;
+}
+
+/**
+ * Check if we're about to draw into the front color buffer.
+ * If so, set the intel->front_buffer_dirty field to true.
+ */
+void
+intel_check_front_buffer_rendering(struct intel_context *intel)
+{
+ const struct gl_framebuffer *fb = intel->ctx.DrawBuffer;
+ if (_mesa_is_winsys_fbo(fb)) {
+ /* drawing to window system buffer */
+ if (fb->_NumColorDrawBuffers > 0) {
+ if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
+ intel->front_buffer_dirty = true;
+ }
+ }
+ }
+}
+
+static void
+intelDrawBuffer(struct gl_context * ctx, GLenum mode)
+{
+ if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ struct intel_context *const intel = intel_context(ctx);
+ const bool was_front_buffer_rendering =
+ intel->is_front_buffer_rendering;
+
+ intel->is_front_buffer_rendering = (mode == GL_FRONT_LEFT)
+ || (mode == GL_FRONT) || (mode == GL_FRONT_AND_BACK);
+
+ /* If we weren't front-buffer rendering before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start rendering again.
+ */
+ if (!was_front_buffer_rendering && intel->is_front_buffer_rendering)
+ dri2InvalidateDrawable(intel->driContext->driDrawablePriv);
+ }
+
+ intel_draw_buffer(ctx);
+}
+
+
+static void
+intelReadBuffer(struct gl_context * ctx, GLenum mode)
+{
+ if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ struct intel_context *const intel = intel_context(ctx);
+ const bool was_front_buffer_reading =
+ intel->is_front_buffer_reading;
+
+ intel->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
+ || (mode == GL_FRONT);
+
+ /* If we weren't front-buffer reading before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start reading again.
+ */
+ if (!was_front_buffer_reading && intel->is_front_buffer_reading)
+ dri2InvalidateDrawable(intel->driContext->driReadablePriv);
+ }
+}
+
+
+void
+intelInitBufferFuncs(struct dd_function_table *functions)
+{
+ functions->DrawBuffer = intelDrawBuffer;
+ functions->ReadBuffer = intelReadBuffer;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_context.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_context.c
index 27a1cbb255e..25334da33f7 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_context.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_context.c
@@ -1 +1,1030 @@
-../intel/intel_context.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/extensions.h"
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/imports.h"
+#include "main/points.h"
+#include "main/renderbuffer.h"
+
+#include "swrast/swrast.h"
+#include "swrast_setup/swrast_setup.h"
+#include "tnl/tnl.h"
+#include "drivers/common/driverfuncs.h"
+#include "drivers/common/meta.h"
+
+#include "intel_chipset.h"
+#include "intel_buffers.h"
+#include "intel_tex.h"
+#include "intel_batchbuffer.h"
+#include "intel_clear.h"
+#include "intel_extensions.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+#include "intel_buffer_objects.h"
+#include "intel_fbo.h"
+#include "intel_bufmgr.h"
+#include "intel_screen.h"
+#include "intel_mipmap_tree.h"
+
+#include "utils.h"
+#include "../glsl/ralloc.h"
+
+#ifndef INTEL_DEBUG
+int INTEL_DEBUG = (0);
+#endif
+
+
+static const GLubyte *
+intelGetString(struct gl_context * ctx, GLenum name)
+{
+ const struct intel_context *const intel = intel_context(ctx);
+ const char *chipset;
+ static char buffer[128];
+
+ switch (name) {
+ case GL_VENDOR:
+ return (GLubyte *) "Intel Open Source Technology Center";
+ break;
+
+ case GL_RENDERER:
+ switch (intel->intelScreen->deviceID) {
+ case PCI_CHIP_845_G:
+ chipset = "Intel(R) 845G";
+ break;
+ case PCI_CHIP_I830_M:
+ chipset = "Intel(R) 830M";
+ break;
+ case PCI_CHIP_I855_GM:
+ chipset = "Intel(R) 852GM/855GM";
+ break;
+ case PCI_CHIP_I865_G:
+ chipset = "Intel(R) 865G";
+ break;
+ case PCI_CHIP_I915_G:
+ chipset = "Intel(R) 915G";
+ break;
+ case PCI_CHIP_E7221_G:
+ chipset = "Intel (R) E7221G (i915)";
+ break;
+ case PCI_CHIP_I915_GM:
+ chipset = "Intel(R) 915GM";
+ break;
+ case PCI_CHIP_I945_G:
+ chipset = "Intel(R) 945G";
+ break;
+ case PCI_CHIP_I945_GM:
+ chipset = "Intel(R) 945GM";
+ break;
+ case PCI_CHIP_I945_GME:
+ chipset = "Intel(R) 945GME";
+ break;
+ case PCI_CHIP_G33_G:
+ chipset = "Intel(R) G33";
+ break;
+ case PCI_CHIP_Q35_G:
+ chipset = "Intel(R) Q35";
+ break;
+ case PCI_CHIP_Q33_G:
+ chipset = "Intel(R) Q33";
+ break;
+ case PCI_CHIP_IGD_GM:
+ case PCI_CHIP_IGD_G:
+ chipset = "Intel(R) IGD";
+ break;
+ case PCI_CHIP_I965_Q:
+ chipset = "Intel(R) 965Q";
+ break;
+ case PCI_CHIP_I965_G:
+ case PCI_CHIP_I965_G_1:
+ chipset = "Intel(R) 965G";
+ break;
+ case PCI_CHIP_I946_GZ:
+ chipset = "Intel(R) 946GZ";
+ break;
+ case PCI_CHIP_I965_GM:
+ chipset = "Intel(R) 965GM";
+ break;
+ case PCI_CHIP_I965_GME:
+ chipset = "Intel(R) 965GME/GLE";
+ break;
+ case PCI_CHIP_GM45_GM:
+ chipset = "Mobile Intel® GM45 Express Chipset";
+ break;
+ case PCI_CHIP_IGD_E_G:
+ chipset = "Intel(R) Integrated Graphics Device";
+ break;
+ case PCI_CHIP_G45_G:
+ chipset = "Intel(R) G45/G43";
+ break;
+ case PCI_CHIP_Q45_G:
+ chipset = "Intel(R) Q45/Q43";
+ break;
+ case PCI_CHIP_G41_G:
+ chipset = "Intel(R) G41";
+ break;
+ case PCI_CHIP_B43_G:
+ case PCI_CHIP_B43_G1:
+ chipset = "Intel(R) B43";
+ break;
+ case PCI_CHIP_ILD_G:
+ chipset = "Intel(R) Ironlake Desktop";
+ break;
+ case PCI_CHIP_ILM_G:
+ chipset = "Intel(R) Ironlake Mobile";
+ break;
+ case PCI_CHIP_SANDYBRIDGE_GT1:
+ case PCI_CHIP_SANDYBRIDGE_GT2:
+ case PCI_CHIP_SANDYBRIDGE_GT2_PLUS:
+ chipset = "Intel(R) Sandybridge Desktop";
+ break;
+ case PCI_CHIP_SANDYBRIDGE_M_GT1:
+ case PCI_CHIP_SANDYBRIDGE_M_GT2:
+ case PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS:
+ chipset = "Intel(R) Sandybridge Mobile";
+ break;
+ case PCI_CHIP_SANDYBRIDGE_S:
+ chipset = "Intel(R) Sandybridge Server";
+ break;
+ case PCI_CHIP_IVYBRIDGE_GT1:
+ case PCI_CHIP_IVYBRIDGE_GT2:
+ chipset = "Intel(R) Ivybridge Desktop";
+ break;
+ case PCI_CHIP_IVYBRIDGE_M_GT1:
+ case PCI_CHIP_IVYBRIDGE_M_GT2:
+ chipset = "Intel(R) Ivybridge Mobile";
+ break;
+ case PCI_CHIP_IVYBRIDGE_S_GT1:
+ case PCI_CHIP_IVYBRIDGE_S_GT2:
+ chipset = "Intel(R) Ivybridge Server";
+ break;
+ case PCI_CHIP_HASWELL_GT1:
+ case PCI_CHIP_HASWELL_GT2:
+ case PCI_CHIP_HASWELL_GT2_PLUS:
+ case PCI_CHIP_HASWELL_SDV_GT1:
+ case PCI_CHIP_HASWELL_SDV_GT2:
+ case PCI_CHIP_HASWELL_SDV_GT2_PLUS:
+ case PCI_CHIP_HASWELL_ULT_GT1:
+ case PCI_CHIP_HASWELL_ULT_GT2:
+ case PCI_CHIP_HASWELL_ULT_GT2_PLUS:
+ case PCI_CHIP_HASWELL_CRW_GT1:
+ case PCI_CHIP_HASWELL_CRW_GT2:
+ case PCI_CHIP_HASWELL_CRW_GT2_PLUS:
+ chipset = "Intel(R) Haswell Desktop";
+ break;
+ case PCI_CHIP_HASWELL_M_GT1:
+ case PCI_CHIP_HASWELL_M_GT2:
+ case PCI_CHIP_HASWELL_M_GT2_PLUS:
+ case PCI_CHIP_HASWELL_SDV_M_GT1:
+ case PCI_CHIP_HASWELL_SDV_M_GT2:
+ case PCI_CHIP_HASWELL_SDV_M_GT2_PLUS:
+ case PCI_CHIP_HASWELL_ULT_M_GT1:
+ case PCI_CHIP_HASWELL_ULT_M_GT2:
+ case PCI_CHIP_HASWELL_ULT_M_GT2_PLUS:
+ case PCI_CHIP_HASWELL_CRW_M_GT1:
+ case PCI_CHIP_HASWELL_CRW_M_GT2:
+ case PCI_CHIP_HASWELL_CRW_M_GT2_PLUS:
+ chipset = "Intel(R) Haswell Mobile";
+ break;
+ case PCI_CHIP_HASWELL_S_GT1:
+ case PCI_CHIP_HASWELL_S_GT2:
+ case PCI_CHIP_HASWELL_S_GT2_PLUS:
+ case PCI_CHIP_HASWELL_SDV_S_GT1:
+ case PCI_CHIP_HASWELL_SDV_S_GT2:
+ case PCI_CHIP_HASWELL_SDV_S_GT2_PLUS:
+ case PCI_CHIP_HASWELL_ULT_S_GT1:
+ case PCI_CHIP_HASWELL_ULT_S_GT2:
+ case PCI_CHIP_HASWELL_ULT_S_GT2_PLUS:
+ case PCI_CHIP_HASWELL_CRW_S_GT1:
+ case PCI_CHIP_HASWELL_CRW_S_GT2:
+ case PCI_CHIP_HASWELL_CRW_S_GT2_PLUS:
+ chipset = "Intel(R) Haswell Server";
+ break;
+ default:
+ chipset = "Unknown Intel Chipset";
+ break;
+ }
+
+ (void) driGetRendererString(buffer, chipset, 0);
+ return (GLubyte *) buffer;
+
+ default:
+ return NULL;
+ }
+}
+
+void
+intel_downsample_for_dri2_flush(struct intel_context *intel,
+ __DRIdrawable *drawable)
+{
+ if (intel->gen < 6) {
+ /* MSAA is not supported, so don't waste time checking for
+ * a multisample buffer.
+ */
+ return;
+ }
+
+ struct gl_framebuffer *fb = drawable->driverPrivate;
+ struct intel_renderbuffer *rb;
+
+ /* Usually, only the back buffer will need to be downsampled. However,
+ * the front buffer will also need it if the user has rendered into it.
+ */
+ static const gl_buffer_index buffers[2] = {
+ BUFFER_BACK_LEFT,
+ BUFFER_FRONT_LEFT,
+ };
+
+ for (int i = 0; i < 2; ++i) {
+ rb = intel_get_renderbuffer(fb, buffers[i]);
+ if (rb == NULL || rb->mt == NULL)
+ continue;
+ intel_miptree_downsample(intel, rb->mt);
+ }
+}
+
+static void
+intel_flush_front(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIcontext *driContext = intel->driContext;
+ __DRIdrawable *driDrawable = driContext->driDrawablePriv;
+ __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
+
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && intel->front_buffer_dirty) {
+ if (screen->dri2.loader->flushFrontBuffer != NULL &&
+ driDrawable &&
+ driDrawable->loaderPrivate) {
+
+ /* Downsample before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
+ *
+ * This potentially downsamples both front and back buffer. It
+ * is unnecessary to downsample the back, but harms nothing except
+ * performance. And no one cares about front-buffer render
+ * performance.
+ */
+ intel_downsample_for_dri2_flush(intel, driDrawable);
+
+ screen->dri2.loader->flushFrontBuffer(driDrawable,
+ driDrawable->loaderPrivate);
+
+ /* We set the dirty bit in intel_prepare_render() if we're
+ * front buffer rendering once we get there.
+ */
+ intel->front_buffer_dirty = false;
+ }
+ }
+}
+
+static unsigned
+intel_bits_per_pixel(const struct intel_renderbuffer *rb)
+{
+ return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
+}
+
+static void
+intel_query_dri2_buffers(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer **buffers,
+ int *count);
+
+static void
+intel_process_dri2_buffer(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer *buffer,
+ struct intel_renderbuffer *rb,
+ const char *buffer_name);
+
+void
+intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
+{
+ struct gl_framebuffer *fb = drawable->driverPrivate;
+ struct intel_renderbuffer *rb;
+ struct intel_context *intel = context->driverPrivate;
+ __DRIbuffer *buffers = NULL;
+ int i, count;
+ const char *region_name;
+
+ /* If we're rendering to the fake front buffer, make sure all the
+ * pending drawing has landed on the real front buffer. Otherwise
+ * when we eventually get to DRI2GetBuffersWithFormat the stale
+ * real front buffer contents will get copied to the new fake front
+ * buffer.
+ */
+ if (intel->is_front_buffer_rendering) {
+ intel_flush(&intel->ctx);
+ intel_flush_front(&intel->ctx);
+ }
+
+ /* Set this up front, so that in case our buffers get invalidated
+ * while we're getting new buffers, we don't clobber the stamp and
+ * thus ignore the invalidate. */
+ drawable->lastStamp = drawable->dri2.stamp;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_DRI))
+ fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
+
+ intel_query_dri2_buffers(intel, drawable, &buffers, &count);
+
+ if (buffers == NULL)
+ return;
+
+ for (i = 0; i < count; i++) {
+ switch (buffers[i].attachment) {
+ case __DRI_BUFFER_FRONT_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ region_name = "dri2 front buffer";
+ break;
+
+ case __DRI_BUFFER_FAKE_FRONT_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ region_name = "dri2 fake front buffer";
+ break;
+
+ case __DRI_BUFFER_BACK_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
+ region_name = "dri2 back buffer";
+ break;
+
+ case __DRI_BUFFER_DEPTH:
+ case __DRI_BUFFER_HIZ:
+ case __DRI_BUFFER_DEPTH_STENCIL:
+ case __DRI_BUFFER_STENCIL:
+ case __DRI_BUFFER_ACCUM:
+ default:
+ fprintf(stderr,
+ "unhandled buffer attach event, attachment type %d\n",
+ buffers[i].attachment);
+ return;
+ }
+
+ intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
+ }
+
+ driUpdateFramebufferSize(&intel->ctx, drawable);
+}
+
+/**
+ * intel_prepare_render should be called anywhere that curent read/drawbuffer
+ * state is required.
+ */
+void
+intel_prepare_render(struct intel_context *intel)
+{
+ __DRIcontext *driContext = intel->driContext;
+ __DRIdrawable *drawable;
+
+ drawable = driContext->driDrawablePriv;
+ if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ intel_draw_buffer(&intel->ctx);
+ driContext->dri2.draw_stamp = drawable->dri2.stamp;
+ }
+
+ drawable = driContext->driReadablePriv;
+ if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ driContext->dri2.read_stamp = drawable->dri2.stamp;
+ }
+
+ /* If we're currently rendering to the front buffer, the rendering
+ * that will happen next will probably dirty the front buffer. So
+ * mark it as dirty here.
+ */
+ if (intel->is_front_buffer_rendering)
+ intel->front_buffer_dirty = true;
+
+ /* Wait for the swapbuffers before the one we just emitted, so we
+ * don't get too many swaps outstanding for apps that are GPU-heavy
+ * but not CPU-heavy.
+ *
+ * We're using intelDRI2Flush (called from the loader before
+ * swapbuffer) and glFlush (for front buffer rendering) as the
+ * indicator that a frame is done and then throttle when we get
+ * here as we prepare to render the next frame. At this point for
+ * round trips for swap/copy and getting new buffers are done and
+ * we'll spend less time waiting on the GPU.
+ *
+ * Unfortunately, we don't have a handle to the batch containing
+ * the swap, and getting our hands on that doesn't seem worth it,
+ * so we just us the first batch we emitted after the last swap.
+ */
+ if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
+ drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
+ drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
+ intel->first_post_swapbuffers_batch = NULL;
+ intel->need_throttle = false;
+ }
+}
+
+static void
+intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIcontext *driContext = intel->driContext;
+
+ if (intel->saved_viewport)
+ intel->saved_viewport(ctx, x, y, w, h);
+
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ dri2InvalidateDrawable(driContext->driDrawablePriv);
+ dri2InvalidateDrawable(driContext->driReadablePriv);
+ }
+}
+
+static const struct dri_debug_control debug_control[] = {
+ { "tex", DEBUG_TEXTURE},
+ { "state", DEBUG_STATE},
+ { "ioctl", DEBUG_IOCTL},
+ { "blit", DEBUG_BLIT},
+ { "mip", DEBUG_MIPTREE},
+ { "fall", DEBUG_PERF},
+ { "perf", DEBUG_PERF},
+ { "verb", DEBUG_VERBOSE},
+ { "bat", DEBUG_BATCH},
+ { "pix", DEBUG_PIXEL},
+ { "buf", DEBUG_BUFMGR},
+ { "reg", DEBUG_REGION},
+ { "fbo", DEBUG_FBO},
+ { "gs", DEBUG_GS},
+ { "sync", DEBUG_SYNC},
+ { "prim", DEBUG_PRIMS },
+ { "vert", DEBUG_VERTS },
+ { "dri", DEBUG_DRI },
+ { "sf", DEBUG_SF },
+ { "san", DEBUG_SANITY },
+ { "sleep", DEBUG_SLEEP },
+ { "stats", DEBUG_STATS },
+ { "tile", DEBUG_TILE },
+ { "wm", DEBUG_WM },
+ { "urb", DEBUG_URB },
+ { "vs", DEBUG_VS },
+ { "clip", DEBUG_CLIP },
+ { "aub", DEBUG_AUB },
+ { NULL, 0 }
+};
+
+
+static void
+intelInvalidateState(struct gl_context * ctx, GLuint new_state)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (ctx->swrast_context)
+ _swrast_InvalidateState(ctx, new_state);
+ _vbo_InvalidateState(ctx, new_state);
+
+ intel->NewGLState |= new_state;
+
+ if (intel->vtbl.invalidate_state)
+ intel->vtbl.invalidate_state( intel, new_state );
+}
+
+void
+intel_flush_rendering_to_batch(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (intel->Fallback)
+ _swrast_flush(ctx);
+
+ if (intel->gen < 4)
+ INTEL_FIREVERTICES(intel);
+}
+
+void
+_intel_flush(struct gl_context *ctx, const char *file, int line)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush_rendering_to_batch(ctx);
+
+ if (intel->batch.used)
+ _intel_batchbuffer_flush(intel, file, line);
+}
+
+static void
+intel_glFlush(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush(ctx);
+ intel_flush_front(ctx);
+ if (intel->is_front_buffer_rendering)
+ intel->need_throttle = true;
+}
+
+void
+intelFinish(struct gl_context * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush(ctx);
+ intel_flush_front(ctx);
+
+ if (intel->batch.last_bo)
+ drm_intel_bo_wait_rendering(intel->batch.last_bo);
+}
+
+void
+intelInitDriverFunctions(struct dd_function_table *functions)
+{
+ _mesa_init_driver_functions(functions);
+
+ functions->Flush = intel_glFlush;
+ functions->Finish = intelFinish;
+ functions->GetString = intelGetString;
+ functions->UpdateState = intelInvalidateState;
+
+ intelInitTextureFuncs(functions);
+ intelInitTextureImageFuncs(functions);
+ intelInitTextureSubImageFuncs(functions);
+ intelInitTextureCopyImageFuncs(functions);
+ intelInitClearFuncs(functions);
+ intelInitBufferFuncs(functions);
+ intelInitPixelFuncs(functions);
+ intelInitBufferObjectFuncs(functions);
+ intel_init_syncobj_functions(functions);
+}
+
+bool
+intelInitContext(struct intel_context *intel,
+ int api,
+ const struct gl_config * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate,
+ struct dd_function_table *functions)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ struct intel_screen *intelScreen = sPriv->driverPrivate;
+ int bo_reuse_mode;
+ struct gl_config visual;
+
+ /* we can't do anything without a connection to the device */
+ if (intelScreen->bufmgr == NULL)
+ return false;
+
+ /* Can't rely on invalidate events, fall back to glViewport hack */
+ if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
+ intel->saved_viewport = functions->Viewport;
+ functions->Viewport = intel_viewport;
+ }
+
+ if (mesaVis == NULL) {
+ memset(&visual, 0, sizeof visual);
+ mesaVis = &visual;
+ }
+
+ if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
+ functions, (void *) intel)) {
+ printf("%s: failed to init mesa context\n", __FUNCTION__);
+ return false;
+ }
+
+ driContextPriv->driverPrivate = intel;
+ intel->intelScreen = intelScreen;
+ intel->driContext = driContextPriv;
+ intel->driFd = sPriv->fd;
+
+ intel->gen = intelScreen->gen;
+
+ const int devID = intelScreen->deviceID;
+ if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID))
+ intel->gt = 1;
+ else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID))
+ intel->gt = 2;
+ else
+ intel->gt = 0;
+
+ if (IS_HASWELL(devID)) {
+ intel->is_haswell = true;
+ } else if (IS_G4X(devID)) {
+ intel->is_g4x = true;
+ } else if (IS_945(devID)) {
+ intel->is_945 = true;
+ }
+
+ if (intel->gen >= 5) {
+ intel->needs_ff_sync = true;
+ }
+
+ intel->has_separate_stencil = intel->intelScreen->hw_has_separate_stencil;
+ intel->must_use_separate_stencil = intel->intelScreen->hw_must_use_separate_stencil;
+ intel->has_hiz = intel->gen >= 6 && !intel->is_haswell;
+ intel->has_llc = intel->intelScreen->hw_has_llc;
+ intel->has_swizzling = intel->intelScreen->hw_has_swizzling;
+
+ memset(&ctx->TextureFormatSupported,
+ 0, sizeof(ctx->TextureFormatSupported));
+
+ driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
+ sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
+ if (intel->gen < 4)
+ intel->maxBatchSize = 4096;
+ else
+ intel->maxBatchSize = sizeof(intel->batch.map);
+
+ intel->bufmgr = intelScreen->bufmgr;
+
+ bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
+ switch (bo_reuse_mode) {
+ case DRI_CONF_BO_REUSE_DISABLED:
+ break;
+ case DRI_CONF_BO_REUSE_ALL:
+ intel_bufmgr_gem_enable_reuse(intel->bufmgr);
+ break;
+ }
+
+ ctx->Const.MinLineWidth = 1.0;
+ ctx->Const.MinLineWidthAA = 1.0;
+ ctx->Const.MaxLineWidth = 5.0;
+ ctx->Const.MaxLineWidthAA = 5.0;
+ ctx->Const.LineWidthGranularity = 0.5;
+
+ ctx->Const.MinPointSize = 1.0;
+ ctx->Const.MinPointSizeAA = 1.0;
+ ctx->Const.MaxPointSize = 255.0;
+ ctx->Const.MaxPointSizeAA = 3.0;
+ ctx->Const.PointSizeGranularity = 1.0;
+
+ ctx->Const.MaxSamples = 1.0;
+
+ if (intel->gen >= 6)
+ ctx->Const.MaxClipPlanes = 8;
+
+ ctx->Const.StripTextureBorder = GL_TRUE;
+
+ /* reinitialize the context point state.
+ * It depend on constants in __struct gl_contextRec::Const
+ */
+ _mesa_init_point(ctx);
+
+ if (intel->gen >= 4) {
+ ctx->Const.MaxRenderbufferSize = 8192;
+ } else {
+ ctx->Const.MaxRenderbufferSize = 2048;
+ }
+
+ /* Initialize the software rasterizer and helper modules.
+ *
+ * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
+ * software fallbacks (which we have to support on legacy GL to do weird
+ * glDrawPixels(), glBitmap(), and other functions).
+ */
+ if (intel->gen <= 3 || api != API_OPENGL_CORE) {
+ _swrast_CreateContext(ctx);
+ }
+
+ _vbo_CreateContext(ctx);
+ if (ctx->swrast_context) {
+ _tnl_CreateContext(ctx);
+ _swsetup_CreateContext(ctx);
+
+ /* Configure swrast to match hardware characteristics: */
+ _swrast_allow_pixel_fog(ctx, false);
+ _swrast_allow_vertex_fog(ctx, true);
+ }
+
+ _mesa_meta_init(ctx);
+
+ intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
+ intel->hw_stipple = 1;
+
+ /* XXX FBO: this doesn't seem to be used anywhere */
+ switch (mesaVis->depthBits) {
+ case 0: /* what to do in this case? */
+ case 16:
+ intel->polygon_offset_scale = 1.0;
+ break;
+ case 24:
+ intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (intel->gen >= 4)
+ intel->polygon_offset_scale /= 0xffff;
+
+ intel->RenderIndex = ~0;
+
+ intelInitExtensions(ctx);
+
+ INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
+ if (INTEL_DEBUG & DEBUG_BUFMGR)
+ dri_bufmgr_set_debug(intel->bufmgr, true);
+
+ if (INTEL_DEBUG & DEBUG_AUB)
+ drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
+
+ intel_batchbuffer_init(intel);
+
+ intel_fbo_init(intel);
+
+ intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
+ "texture_tiling");
+ intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
+
+ if (!driQueryOptionb(&intel->optionCache, "hiz")) {
+ intel->has_hiz = false;
+ /* On gen6, you can only do separate stencil with HIZ. */
+ if (intel->gen == 6)
+ intel->has_separate_stencil = false;
+ }
+
+ intel->prim.primitive = ~0;
+
+ /* Force all software fallbacks */
+#ifdef I915
+ if (driQueryOptionb(&intel->optionCache, "no_rast")) {
+ fprintf(stderr, "disabling 3D rasterization\n");
+ intel->no_rast = 1;
+ }
+#endif
+
+ if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
+ fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
+ intel->always_flush_batch = 1;
+ }
+
+ if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
+ fprintf(stderr, "flushing GPU caches before/after each draw call\n");
+ intel->always_flush_cache = 1;
+ }
+
+ return true;
+}
+
+void
+intelDestroyContext(__DRIcontext * driContextPriv)
+{
+ struct intel_context *intel =
+ (struct intel_context *) driContextPriv->driverPrivate;
+ struct gl_context *ctx = &intel->ctx;
+
+ assert(intel); /* should never be null */
+ if (intel) {
+ INTEL_FIREVERTICES(intel);
+
+ /* Dump a final BMP in case the application doesn't call SwapBuffers */
+ if (INTEL_DEBUG & DEBUG_AUB) {
+ intel_batchbuffer_flush(intel);
+ aub_dump_bmp(&intel->ctx);
+ }
+
+ _mesa_meta_free(&intel->ctx);
+
+ intel->vtbl.destroy(intel);
+
+ if (ctx->swrast_context) {
+ _swsetup_DestroyContext(&intel->ctx);
+ _tnl_DestroyContext(&intel->ctx);
+ }
+ _vbo_DestroyContext(&intel->ctx);
+
+ if (ctx->swrast_context)
+ _swrast_DestroyContext(&intel->ctx);
+ intel->Fallback = 0x0; /* don't call _swrast_Flush later */
+
+ intel_batchbuffer_free(intel);
+
+ free(intel->prim.vb);
+ intel->prim.vb = NULL;
+ drm_intel_bo_unreference(intel->prim.vb_bo);
+ intel->prim.vb_bo = NULL;
+ drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
+ intel->first_post_swapbuffers_batch = NULL;
+
+ driDestroyOptionCache(&intel->optionCache);
+
+ /* free the Mesa context */
+ _mesa_free_context_data(&intel->ctx);
+
+ _math_matrix_dtr(&intel->ViewportMatrix);
+
+ ralloc_free(intel);
+ driContextPriv->driverPrivate = NULL;
+ }
+}
+
+GLboolean
+intelUnbindContext(__DRIcontext * driContextPriv)
+{
+ /* Unset current context and dispath table */
+ _mesa_make_current(NULL, NULL, NULL);
+
+ return true;
+}
+
+GLboolean
+intelMakeCurrent(__DRIcontext * driContextPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv)
+{
+ struct intel_context *intel;
+ GET_CURRENT_CONTEXT(curCtx);
+
+ if (driContextPriv)
+ intel = (struct intel_context *) driContextPriv->driverPrivate;
+ else
+ intel = NULL;
+
+ /* According to the glXMakeCurrent() man page: "Pending commands to
+ * the previous context, if any, are flushed before it is released."
+ * But only flush if we're actually changing contexts.
+ */
+ if (intel_context(curCtx) && intel_context(curCtx) != intel) {
+ _mesa_flush(curCtx);
+ }
+
+ if (driContextPriv) {
+ struct gl_framebuffer *fb, *readFb;
+
+ if (driDrawPriv == NULL && driReadPriv == NULL) {
+ fb = _mesa_get_incomplete_framebuffer();
+ readFb = _mesa_get_incomplete_framebuffer();
+ } else {
+ fb = driDrawPriv->driverPrivate;
+ readFb = driReadPriv->driverPrivate;
+ driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
+ driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
+ }
+
+ intel_prepare_render(intel);
+ _mesa_make_current(&intel->ctx, fb, readFb);
+
+ /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
+ * is NULL at that point. We can't call _mesa_makecurrent()
+ * first, since we need the buffer size for the initial
+ * viewport. So just call intel_draw_buffer() again here. */
+ intel_draw_buffer(&intel->ctx);
+ }
+ else {
+ _mesa_make_current(NULL, NULL, NULL);
+ }
+
+ return true;
+}
+
+/**
+ * \brief Query DRI2 to obtain a DRIdrawable's buffers.
+ *
+ * To determine which DRI buffers to request, examine the renderbuffers
+ * attached to the drawable's framebuffer. Then request the buffers with
+ * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
+ *
+ * This is called from intel_update_renderbuffers().
+ *
+ * \param drawable Drawable whose buffers are queried.
+ * \param buffers [out] List of buffers returned by DRI2 query.
+ * \param buffer_count [out] Number of buffers returned.
+ *
+ * \see intel_update_renderbuffers()
+ * \see DRI2GetBuffers()
+ * \see DRI2GetBuffersWithFormat()
+ */
+static void
+intel_query_dri2_buffers(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer **buffers,
+ int *buffer_count)
+{
+ __DRIscreen *screen = intel->intelScreen->driScrnPriv;
+ struct gl_framebuffer *fb = drawable->driverPrivate;
+ int i = 0;
+ const int max_attachments = 4;
+ unsigned *attachments = calloc(2 * max_attachments, sizeof(unsigned));
+
+ struct intel_renderbuffer *front_rb;
+ struct intel_renderbuffer *back_rb;
+
+ front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
+
+ if ((intel->is_front_buffer_rendering ||
+ intel->is_front_buffer_reading ||
+ !back_rb) && front_rb) {
+ attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
+ attachments[i++] = intel_bits_per_pixel(front_rb);
+ }
+
+ if (back_rb) {
+ attachments[i++] = __DRI_BUFFER_BACK_LEFT;
+ attachments[i++] = intel_bits_per_pixel(back_rb);
+ }
+
+ assert(i <= 2 * max_attachments);
+
+ *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
+ &drawable->w,
+ &drawable->h,
+ attachments, i / 2,
+ buffer_count,
+ drawable->loaderPrivate);
+ free(attachments);
+}
+
+/**
+ * \brief Assign a DRI buffer's DRM region to a renderbuffer.
+ *
+ * This is called from intel_update_renderbuffers().
+ *
+ * \par Note:
+ * DRI buffers whose attachment point is DRI2BufferStencil or
+ * DRI2BufferDepthStencil are handled as special cases.
+ *
+ * \param buffer_name is a human readable name, such as "dri2 front buffer",
+ * that is passed to intel_region_alloc_for_handle().
+ *
+ * \see intel_update_renderbuffers()
+ * \see intel_region_alloc_for_handle()
+ */
+static void
+intel_process_dri2_buffer(struct intel_context *intel,
+ __DRIdrawable *drawable,
+ __DRIbuffer *buffer,
+ struct intel_renderbuffer *rb,
+ const char *buffer_name)
+{
+ struct intel_region *region = NULL;
+
+ if (!rb)
+ return;
+
+ unsigned num_samples = rb->Base.Base.NumSamples;
+
+ /* We try to avoid closing and reopening the same BO name, because the first
+ * use of a mapping of the buffer involves a bunch of page faulting which is
+ * moderately expensive.
+ */
+ if (num_samples == 0) {
+ if (rb->mt &&
+ rb->mt->region &&
+ rb->mt->region->name == buffer->name)
+ return;
+ } else {
+ if (rb->mt &&
+ rb->mt->singlesample_mt &&
+ rb->mt->singlesample_mt->region &&
+ rb->mt->singlesample_mt->region->name == buffer->name)
+ return;
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
+ fprintf(stderr,
+ "attaching buffer %d, at %d, cpp %d, pitch %d\n",
+ buffer->name, buffer->attachment,
+ buffer->cpp, buffer->pitch);
+ }
+
+ intel_miptree_release(&rb->mt);
+ region = intel_region_alloc_for_handle(intel->intelScreen,
+ buffer->cpp,
+ drawable->w,
+ drawable->h,
+ buffer->pitch / buffer->cpp,
+ buffer->name,
+ buffer_name);
+ if (!region)
+ return;
+
+ rb->mt = intel_miptree_create_for_dri2_buffer(intel,
+ buffer->attachment,
+ intel_rb_format(rb),
+ num_samples,
+ region);
+ intel_region_release(&region);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_extensions.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_extensions.c
index a2f3e8cd208..885e8a43e80 120000..100755
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_extensions.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_extensions.c
@@ -1 +1,197 @@
-../intel/intel_extensions.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mfeatures.h"
+#include "main/version.h"
+
+#include "intel_chipset.h"
+#include "intel_context.h"
+#include "intel_extensions.h"
+#include "intel_reg.h"
+#include "utils.h"
+
+/**
+ * Initializes potential list of extensions if ctx == NULL, or actually enables
+ * extensions for a context.
+ */
+void
+intelInitExtensions(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ ctx->Extensions.ARB_draw_elements_base_vertex = true;
+ ctx->Extensions.ARB_explicit_attrib_location = true;
+ if (_mesa_is_desktop_gl(ctx))
+ ctx->Extensions.ARB_framebuffer_object = true;
+ ctx->Extensions.ARB_half_float_pixel = true;
+ ctx->Extensions.ARB_map_buffer_range = true;
+ ctx->Extensions.ARB_point_sprite = true;
+ ctx->Extensions.ARB_shader_objects = true;
+ ctx->Extensions.ARB_shading_language_100 = true;
+ ctx->Extensions.ARB_sync = true;
+ ctx->Extensions.ARB_texture_border_clamp = true;
+ ctx->Extensions.ARB_texture_cube_map = true;
+ ctx->Extensions.ARB_texture_env_combine = true;
+ ctx->Extensions.ARB_texture_env_crossbar = true;
+ ctx->Extensions.ARB_texture_env_dot3 = true;
+ ctx->Extensions.ARB_texture_storage = true;
+ ctx->Extensions.ARB_vertex_program = true;
+ ctx->Extensions.ARB_vertex_shader = true;
+ ctx->Extensions.EXT_blend_color = true;
+ ctx->Extensions.EXT_blend_equation_separate = true;
+ ctx->Extensions.EXT_blend_func_separate = true;
+ ctx->Extensions.EXT_blend_minmax = true;
+ ctx->Extensions.EXT_framebuffer_blit = true;
+ ctx->Extensions.EXT_framebuffer_object = true;
+ ctx->Extensions.EXT_framebuffer_multisample = true;
+ ctx->Extensions.EXT_fog_coord = true;
+ ctx->Extensions.EXT_gpu_program_parameters = true;
+ ctx->Extensions.EXT_packed_depth_stencil = true;
+ ctx->Extensions.EXT_pixel_buffer_object = true;
+ ctx->Extensions.EXT_point_parameters = true;
+ ctx->Extensions.EXT_provoking_vertex = true;
+ ctx->Extensions.EXT_secondary_color = true;
+ ctx->Extensions.EXT_separate_shader_objects = true;
+ ctx->Extensions.EXT_texture_env_dot3 = true;
+ ctx->Extensions.EXT_texture_filter_anisotropic = true;
+ ctx->Extensions.APPLE_object_purgeable = true;
+ ctx->Extensions.MESA_pack_invert = true;
+ ctx->Extensions.MESA_ycbcr_texture = true;
+ ctx->Extensions.NV_blend_square = true;
+ ctx->Extensions.NV_texture_rectangle = true;
+ ctx->Extensions.NV_vertex_program = true;
+ ctx->Extensions.NV_vertex_program1_1 = true;
+ ctx->Extensions.TDFX_texture_compression_FXT1 = true;
+#if FEATURE_OES_EGL_image
+ ctx->Extensions.OES_EGL_image = true;
+#endif
+ ctx->Extensions.OES_draw_texture = true;
+ ctx->Extensions.OES_compressed_ETC1_RGB8_texture = true;
+ ctx->Extensions.ARB_texture_rgb10_a2ui = true;
+
+ if (intel->gen >= 6)
+ if (ctx->API == API_OPENGL_CORE) {
+ ctx->Const.GLSLVersion = 140;
+ } else {
+ ctx->Const.GLSLVersion = 130;
+ }
+ else
+ ctx->Const.GLSLVersion = 120;
+ _mesa_override_glsl_version(ctx);
+
+ if (intel->gen == 6 ||
+ (intel->gen == 7 && intel->intelScreen->kernel_has_gen7_sol_reset))
+ ctx->Extensions.EXT_transform_feedback = true;
+
+ if (intel->gen >= 6) {
+ ctx->Extensions.ARB_blend_func_extended = !driQueryOptionb(&intel->optionCache, "disable_blend_func_extended");
+ ctx->Extensions.ARB_draw_buffers_blend = true;
+ ctx->Extensions.ARB_uniform_buffer_object = true;
+
+ if (ctx->API == API_OPENGL_CORE) {
+ ctx->Extensions.ARB_texture_buffer_object = true;
+ }
+ }
+
+ if (intel->gen >= 5)
+ ctx->Extensions.EXT_timer_query = true;
+
+ if (intel->gen >= 6) {
+ uint64_t dummy;
+ /* Test if the kernel has the ioctl. */
+ if (drm_intel_reg_read(intel->bufmgr, TIMESTAMP, &dummy) == 0)
+ ctx->Extensions.ARB_timer_query = true;
+ }
+
+ if (intel->gen >= 4) {
+ ctx->Extensions.ARB_color_buffer_float = true;
+ ctx->Extensions.ARB_depth_buffer_float = true;
+ ctx->Extensions.ARB_depth_clamp = true;
+ ctx->Extensions.ARB_draw_instanced = true;
+ ctx->Extensions.ARB_instanced_arrays = true;
+ ctx->Extensions.ARB_fragment_coord_conventions = true;
+ ctx->Extensions.ARB_fragment_program_shadow = true;
+ ctx->Extensions.ARB_fragment_shader = true;
+ ctx->Extensions.ARB_half_float_vertex = true;
+ ctx->Extensions.ARB_occlusion_query = true;
+ ctx->Extensions.ARB_occlusion_query2 = true;
+ ctx->Extensions.ARB_point_sprite = true;
+ ctx->Extensions.ARB_seamless_cube_map = true;
+ ctx->Extensions.ARB_shader_bit_encoding = true;
+ ctx->Extensions.ARB_shader_texture_lod = true;
+#ifdef TEXTURE_FLOAT_ENABLED
+ ctx->Extensions.ARB_texture_float = true;
+ ctx->Extensions.EXT_texture_shared_exponent = true;
+ ctx->Extensions.EXT_packed_float = true;
+#endif
+ ctx->Extensions.ARB_texture_compression_rgtc = true;
+ ctx->Extensions.ARB_texture_rg = true;
+ ctx->Extensions.EXT_draw_buffers2 = true;
+ ctx->Extensions.EXT_framebuffer_sRGB = true;
+ ctx->Extensions.EXT_texture_array = true;
+ ctx->Extensions.EXT_texture_integer = true;
+ ctx->Extensions.EXT_texture_snorm = true;
+ ctx->Extensions.EXT_texture_sRGB = true;
+ ctx->Extensions.EXT_texture_sRGB_decode = true;
+ ctx->Extensions.EXT_texture_swizzle = true;
+ ctx->Extensions.EXT_vertex_array_bgra = true;
+ ctx->Extensions.ATI_envmap_bumpmap = true;
+ ctx->Extensions.MESA_texture_array = true;
+ ctx->Extensions.NV_conditional_render = true;
+ }
+
+ if (intel->gen >= 3) {
+ ctx->Extensions.ARB_ES2_compatibility = true;
+ ctx->Extensions.ARB_depth_texture = true;
+ ctx->Extensions.ARB_fragment_program = true;
+ ctx->Extensions.ARB_shadow = true;
+ ctx->Extensions.ARB_texture_non_power_of_two = true;
+ ctx->Extensions.EXT_shadow_funcs = true;
+ ctx->Extensions.EXT_stencil_two_side = true;
+ ctx->Extensions.ATI_separate_stencil = true;
+ ctx->Extensions.ATI_texture_env_combine3 = true;
+ ctx->Extensions.NV_texture_env_combine4 = true;
+
+ if (driQueryOptionb(&intel->optionCache, "fragment_shader"))
+ ctx->Extensions.ARB_fragment_shader = true;
+
+ if (driQueryOptionb(&intel->optionCache, "stub_occlusion_query"))
+ ctx->Extensions.ARB_occlusion_query = true;
+ }
+
+ if (intel->ctx.Mesa_DXTn) {
+ ctx->Extensions.EXT_texture_compression_s3tc = true;
+ ctx->Extensions.S3_s3tc = true;
+ }
+ else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
+ ctx->Extensions.EXT_texture_compression_s3tc = true;
+ }
+
+ if (intel->gen >= 4) {
+ ctx->Extensions.NV_primitive_restart = true;
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_fbo.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_fbo.c
index a19f86dcc57..65494a2d506 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_fbo.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_fbo.c
@@ -1 +1,964 @@
-../intel/intel_fbo.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/enums.h"
+#include "main/imports.h"
+#include "main/macros.h"
+#include "main/mfeatures.h"
+#include "main/mtypes.h"
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/context.h"
+#include "main/teximage.h"
+#include "main/image.h"
+
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_blit.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_tex.h"
+#include "intel_span.h"
+#ifndef I915
+#include "brw_context.h"
+#endif
+
+#define FILE_DEBUG_FLAG DEBUG_FBO
+
+static struct gl_renderbuffer *
+intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
+
+struct intel_region*
+intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
+{
+ struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
+ if (irb && irb->mt) {
+ if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
+ return irb->mt->stencil_mt->region;
+ else
+ return irb->mt->region;
+ } else
+ return NULL;
+}
+
+/**
+ * Create a new framebuffer object.
+ */
+static struct gl_framebuffer *
+intel_new_framebuffer(struct gl_context * ctx, GLuint name)
+{
+ /* Only drawable state in intel_framebuffer at this time, just use Mesa's
+ * class
+ */
+ return _mesa_new_framebuffer(ctx, name);
+}
+
+
+/** Called by gl_renderbuffer::Delete() */
+static void
+intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ ASSERT(irb);
+
+ intel_miptree_release(&irb->mt);
+
+ _mesa_delete_renderbuffer(ctx, rb);
+}
+
+/**
+ * \see dd_function_table::MapRenderbuffer
+ */
+static void
+intel_map_renderbuffer(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **out_map,
+ GLint *out_stride)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ void *map;
+ int stride;
+
+ if (srb->Buffer) {
+ /* this is a malloc'd renderbuffer (accum buffer), not an irb */
+ GLint bpp = _mesa_get_format_bytes(rb->Format);
+ GLint rowStride = srb->RowStride;
+ *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
+ *out_stride = rowStride;
+ return;
+ }
+
+ /* We sometimes get called with this by our intel_span.c usage. */
+ if (!irb->mt) {
+ *out_map = NULL;
+ *out_stride = 0;
+ return;
+ }
+
+ /* For a window-system renderbuffer, we need to flip the mapping we receive
+ * upside-down. So we need to ask for a rectangle on flipped vertically, and
+ * we then return a pointer to the bottom of it with a negative stride.
+ */
+ if (rb->Name == 0) {
+ y = rb->Height - y - h;
+ }
+
+ intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
+ x, y, w, h, mode, &map, &stride);
+
+ if (rb->Name == 0) {
+ map += (h - 1) * stride;
+ stride = -stride;
+ }
+
+ DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
+ __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
+ x, y, w, h, map, stride);
+
+ *out_map = map;
+ *out_stride = stride;
+}
+
+/**
+ * \see dd_function_table::UnmapRenderbuffer
+ */
+static void
+intel_unmap_renderbuffer(struct gl_context *ctx,
+ struct gl_renderbuffer *rb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ DBG("%s: rb %d (%s)\n", __FUNCTION__,
+ rb->Name, _mesa_get_format_name(rb->Format));
+
+ if (srb->Buffer) {
+ /* this is a malloc'd renderbuffer (accum buffer) */
+ /* nothing to do */
+ return;
+ }
+
+ intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
+}
+
+
+/**
+ * Round up the requested multisample count to the next supported sample size.
+ */
+unsigned
+intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
+{
+ switch (intel->gen) {
+ case 6:
+ /* Gen6 supports only 4x multisampling. */
+ if (num_samples > 0)
+ return 4;
+ else
+ return 0;
+ case 7:
+ /* Gen7 supports 4x and 8x multisampling. */
+ if (num_samples > 4)
+ return 8;
+ else if (num_samples > 0)
+ return 4;
+ else
+ return 0;
+ return 0;
+ default:
+ /* MSAA unsupported. However, a careful reading of
+ * EXT_framebuffer_multisample reveals that we need to permit
+ * num_samples to be 1 (since num_samples is permitted to be as high as
+ * GL_MAX_SAMPLES, and GL_MAX_SAMPLES must be at least 1). Since
+ * platforms before Gen6 don't support MSAA, this is safe, because
+ * multisampling won't happen anyhow.
+ */
+ if (num_samples > 0)
+ return 1;
+ return 0;
+ }
+}
+
+
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+GLboolean
+intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_screen *screen = intel->intelScreen;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
+
+ switch (internalFormat) {
+ default:
+ /* Use the same format-choice logic as for textures.
+ * Renderbuffers aren't any different from textures for us,
+ * except they're less useful because you can't texture with
+ * them.
+ */
+ rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
+ internalFormat,
+ GL_NONE, GL_NONE);
+ break;
+ case GL_STENCIL_INDEX:
+ case GL_STENCIL_INDEX1_EXT:
+ case GL_STENCIL_INDEX4_EXT:
+ case GL_STENCIL_INDEX8_EXT:
+ case GL_STENCIL_INDEX16_EXT:
+ /* These aren't actual texture formats, so force them here. */
+ if (intel->has_separate_stencil) {
+ rb->Format = MESA_FORMAT_S8;
+ } else {
+ assert(!intel->must_use_separate_stencil);
+ rb->Format = MESA_FORMAT_S8_Z24;
+ }
+ break;
+ }
+
+ rb->Width = width;
+ rb->Height = height;
+ rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
+
+ intel_miptree_release(&irb->mt);
+
+ DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(internalFormat),
+ _mesa_get_format_name(rb->Format), width, height);
+
+ if (width == 0 || height == 0)
+ return true;
+
+ irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
+ width, height,
+ rb->NumSamples);
+ if (!irb->mt)
+ return false;
+
+ return true;
+}
+
+
+#if FEATURE_OES_EGL_image
+static void
+intel_image_target_renderbuffer_storage(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ void *image_handle)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb;
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = intel->intelScreen->driScrnPriv;
+ image = screen->dri2.image->lookupEGLImage(screen, image_handle,
+ screen->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ /* __DRIimage is opaque to the core so it has to be checked here */
+ switch (image->format) {
+ case MESA_FORMAT_RGBA8888_REV:
+ _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
+ "glEGLImageTargetRenderbufferStorage(unsupported image format");
+ return;
+ break;
+ default:
+ break;
+ }
+
+ irb = intel_renderbuffer(rb);
+ intel_miptree_release(&irb->mt);
+ irb->mt = intel_miptree_create_for_region(intel,
+ GL_TEXTURE_2D,
+ image->format,
+ image->region);
+ if (!irb->mt)
+ return;
+
+ rb->InternalFormat = image->internal_format;
+ rb->Width = image->region->width;
+ rb->Height = image->region->height;
+ rb->Format = image->format;
+ rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
+ image->internal_format);
+}
+#endif
+
+/**
+ * Called for each hardware renderbuffer when a _window_ is resized.
+ * Just update fields.
+ * Not used for user-created renderbuffers!
+ */
+static GLboolean
+intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ ASSERT(rb->Name == 0);
+ rb->Width = width;
+ rb->Height = height;
+ rb->InternalFormat = internalFormat;
+
+ return true;
+}
+
+
+static void
+intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
+ GLuint width, GLuint height)
+{
+ int i;
+
+ _mesa_resize_framebuffer(ctx, fb, width, height);
+
+ fb->Initialized = true; /* XXX remove someday */
+
+ if (_mesa_is_user_fbo(fb)) {
+ return;
+ }
+
+
+ /* Make sure all window system renderbuffers are up to date */
+ for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
+ struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
+
+ /* only resize if size is changing */
+ if (rb && (rb->Width != width || rb->Height != height)) {
+ rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
+ }
+ }
+}
+
+
+/** Dummy function for gl_renderbuffer::AllocStorage() */
+static GLboolean
+intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
+ return false;
+}
+
+/**
+ * Create a new intel_renderbuffer which corresponds to an on-screen window,
+ * not a user-created renderbuffer.
+ *
+ * \param num_samples must be quantized.
+ */
+struct intel_renderbuffer *
+intel_create_renderbuffer(gl_format format, unsigned num_samples)
+{
+ struct intel_renderbuffer *irb;
+ struct gl_renderbuffer *rb;
+
+ GET_CURRENT_CONTEXT(ctx);
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ rb = &irb->Base.Base;
+
+ _mesa_init_renderbuffer(rb, 0);
+ rb->ClassID = INTEL_RB_CLASS;
+ rb->_BaseFormat = _mesa_get_format_base_format(format);
+ rb->Format = format;
+ rb->InternalFormat = rb->_BaseFormat;
+ rb->NumSamples = num_samples;
+
+ /* intel-specific methods */
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_alloc_window_storage;
+
+ return irb;
+}
+
+/**
+ * Private window-system buffers (as opposed to ones shared with the display
+ * server created with intel_create_renderbuffer()) are most similar in their
+ * handling to user-created renderbuffers, but they have a resize handler that
+ * may be called at intel_update_renderbuffers() time.
+ *
+ * \param num_samples must be quantized.
+ */
+struct intel_renderbuffer *
+intel_create_private_renderbuffer(gl_format format, unsigned num_samples)
+{
+ struct intel_renderbuffer *irb;
+
+ irb = intel_create_renderbuffer(format, num_samples);
+ irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
+
+ return irb;
+}
+
+/**
+ * Create a new renderbuffer object.
+ * Typically called via glBindRenderbufferEXT().
+ */
+static struct gl_renderbuffer *
+intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
+{
+ /*struct intel_context *intel = intel_context(ctx); */
+ struct intel_renderbuffer *irb;
+ struct gl_renderbuffer *rb;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ rb = &irb->Base.Base;
+
+ _mesa_init_renderbuffer(rb, name);
+ rb->ClassID = INTEL_RB_CLASS;
+
+ /* intel-specific methods */
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_alloc_renderbuffer_storage;
+ /* span routines set in alloc_storage function */
+
+ return rb;
+}
+
+
+/**
+ * Called via glBindFramebufferEXT().
+ */
+static void
+intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
+ struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
+{
+ if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ intel_draw_buffer(ctx);
+ }
+ else {
+ /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
+ }
+}
+
+
+/**
+ * Called via glFramebufferRenderbufferEXT().
+ */
+static void
+intel_framebuffer_renderbuffer(struct gl_context * ctx,
+ struct gl_framebuffer *fb,
+ GLenum attachment, struct gl_renderbuffer *rb)
+{
+ DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
+
+ _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
+ intel_draw_buffer(ctx);
+}
+
+/**
+ * \par Special case for separate stencil
+ *
+ * When wrapping a depthstencil texture that uses separate stencil, this
+ * function is recursively called twice: once to create \c
+ * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
+ * call to create \c irb->wrapped_depth, the \c format and \c
+ * internal_format parameters do not match \c mt->format. In that case, \c
+ * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
+ * MESA_FORMAT_X8_Z24.
+ *
+ * @return true on success
+ */
+
+static bool
+intel_renderbuffer_update_wrapper(struct intel_context *intel,
+ struct intel_renderbuffer *irb,
+ struct gl_texture_image *image,
+ uint32_t layer)
+{
+ struct gl_renderbuffer *rb = &irb->Base.Base;
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+ int level = image->Level;
+
+ rb->Format = image->TexFormat;
+ rb->InternalFormat = image->InternalFormat;
+ rb->_BaseFormat = image->_BaseFormat;
+ rb->Width = mt->level[level].width;
+ rb->Height = mt->level[level].height;
+
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_nop_alloc_storage;
+
+ intel_miptree_check_level_layer(mt, level, layer);
+ irb->mt_level = level;
+ irb->mt_layer = layer;
+
+ intel_miptree_reference(&irb->mt, mt);
+
+ intel_renderbuffer_set_draw_offset(irb);
+
+ if (mt->hiz_mt == NULL &&
+ intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
+ intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
+ if (!mt->hiz_mt)
+ return false;
+ }
+
+ return true;
+}
+
+void
+intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
+{
+ unsigned int dst_x, dst_y;
+
+ /* compute offset of the particular 2D image within the texture region */
+ intel_miptree_get_image_offset(irb->mt,
+ irb->mt_level,
+ 0, /* face, which we ignore */
+ irb->mt_layer,
+ &dst_x, &dst_y);
+
+ irb->draw_x = dst_x;
+ irb->draw_y = dst_y;
+}
+
+/**
+ * Rendering to tiled buffers requires that the base address of the
+ * buffer be aligned to a page boundary. We generally render to
+ * textures by pointing the surface at the mipmap image level, which
+ * may not be aligned to a tile boundary.
+ *
+ * This function returns an appropriately-aligned base offset
+ * according to the tiling restrictions, plus any required x/y offset
+ * from there.
+ */
+uint32_t
+intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
+ uint32_t *tile_x,
+ uint32_t *tile_y)
+{
+ struct intel_region *region = irb->mt->region;
+ uint32_t mask_x, mask_y;
+
+ intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
+
+ *tile_x = irb->draw_x & mask_x;
+ *tile_y = irb->draw_y & mask_y;
+ return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
+ irb->draw_y & ~mask_y, false);
+}
+
+/**
+ * Called by glFramebufferTexture[123]DEXT() (and other places) to
+ * prepare for rendering into texture memory. This might be called
+ * many times to choose different texture levels, cube faces, etc
+ * before intel_finish_render_texture() is ever called.
+ */
+static void
+intel_render_texture(struct gl_context * ctx,
+ struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+ int layer;
+
+ (void) fb;
+
+ if (att->CubeMapFace > 0) {
+ assert(att->Zoffset == 0);
+ layer = att->CubeMapFace;
+ } else {
+ layer = att->Zoffset;
+ }
+
+ if (!intel_image->mt) {
+ /* Fallback on drawing to a texture that doesn't have a miptree
+ * (has a border, width/height 0, etc.)
+ */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+ else if (!irb) {
+ intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
+
+ irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
+
+ if (irb) {
+ /* bind the wrapper to the attachment point */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
+ }
+ else {
+ /* fallback to software rendering */
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+ }
+
+ if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+
+ irb->tex_image = image;
+
+ DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
+ _mesa_get_format_name(image->TexFormat),
+ att->Texture->Name, image->Width, image->Height,
+ irb->Base.Base.RefCount);
+
+ /* update drawing region, etc */
+ intel_draw_buffer(ctx);
+}
+
+
+/**
+ * Called by Mesa when rendering to a texture is done.
+ */
+static void
+intel_finish_render_texture(struct gl_context * ctx,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_texture_object *tex_obj = att->Texture;
+ struct gl_texture_image *image =
+ tex_obj->Image[att->CubeMapFace][att->TextureLevel];
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+
+ DBG("Finish render %s texture tex=%u\n",
+ _mesa_get_format_name(image->TexFormat), att->Texture->Name);
+
+ if (irb)
+ irb->tex_image = NULL;
+
+ /* Since we've (probably) rendered to the texture and will (likely) use
+ * it in the texture domain later on in this batchbuffer, flush the
+ * batch. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer like GEM does in the kernel.
+ */
+ intel_batchbuffer_emit_mi_flush(intel);
+}
+
+/**
+ * Do additional "completeness" testing of a framebuffer object.
+ */
+static void
+intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const struct intel_renderbuffer *depthRb =
+ intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ const struct intel_renderbuffer *stencilRb =
+ intel_get_renderbuffer(fb, BUFFER_STENCIL);
+ struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
+ int i;
+
+ DBG("%s() on fb %p (%s)\n", __FUNCTION__,
+ fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
+ (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
+
+ if (depthRb)
+ depth_mt = depthRb->mt;
+ if (stencilRb) {
+ stencil_mt = stencilRb->mt;
+ if (stencil_mt->stencil_mt)
+ stencil_mt = stencil_mt->stencil_mt;
+ }
+
+ if (depth_mt && stencil_mt) {
+ if (depth_mt == stencil_mt) {
+ /* For true packed depth/stencil (not faked on prefers-separate-stencil
+ * hardware) we need to be sure they're the same level/layer, since
+ * we'll be emitting a single packet describing the packed setup.
+ */
+ if (depthRb->mt_level != stencilRb->mt_level ||
+ depthRb->mt_layer != stencilRb->mt_layer) {
+ DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
+ depthRb->mt_level,
+ depthRb->mt_layer,
+ stencilRb->mt_level,
+ stencilRb->mt_layer);
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ } else {
+ if (!intel->has_separate_stencil) {
+ DBG("separate stencil unsupported\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ if (stencil_mt->format != MESA_FORMAT_S8) {
+ DBG("separate stencil is %s instead of S8\n",
+ _mesa_get_format_name(stencil_mt->format));
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
+ /* Before Gen7, separate depth and stencil buffers can be used
+ * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
+ * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
+ * [DevSNB]: This field must be set to the same value (enabled
+ * or disabled) as Hierarchical Depth Buffer Enable.
+ */
+ DBG("separate stencil without HiZ\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
+ }
+ }
+ }
+
+ for (i = 0; i < Elements(fb->Attachment); i++) {
+ struct gl_renderbuffer *rb;
+ struct intel_renderbuffer *irb;
+
+ if (fb->Attachment[i].Type == GL_NONE)
+ continue;
+
+ /* A supported attachment will have a Renderbuffer set either
+ * from being a Renderbuffer or being a texture that got the
+ * intel_wrap_texture() treatment.
+ */
+ rb = fb->Attachment[i].Renderbuffer;
+ if (rb == NULL) {
+ DBG("attachment without renderbuffer\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ continue;
+ }
+
+ if (fb->Attachment[i].Type == GL_TEXTURE) {
+ const struct gl_texture_image *img =
+ _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
+
+ if (img->Border) {
+ DBG("texture with border\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ continue;
+ }
+ }
+
+ irb = intel_renderbuffer(rb);
+ if (irb == NULL) {
+ DBG("software rendering renderbuffer\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ continue;
+ }
+
+ if (!intel->vtbl.render_target_supported(intel, rb)) {
+ DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
+ _mesa_get_format_name(intel_rb_format(irb)));
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ }
+}
+
+/**
+ * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
+ * We can do this when the dst renderbuffer is actually a texture and
+ * there is no scaling, mirroring or scissoring.
+ *
+ * \return new buffer mask indicating the buffers left to blit using the
+ * normal path.
+ */
+static GLbitfield
+intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
+ GLint srcX0, GLint srcY0,
+ GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0,
+ GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter)
+{
+ if (mask & GL_COLOR_BUFFER_BIT) {
+ const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
+ const struct gl_framebuffer *readFb = ctx->ReadBuffer;
+ const struct gl_renderbuffer_attachment *drawAtt =
+ &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
+ struct intel_renderbuffer *srcRb =
+ intel_renderbuffer(readFb->_ColorReadBuffer);
+
+ /* If the source and destination are the same size with no
+ mirroring, the rectangles are within the size of the
+ texture and there is no scissor then we can use
+ glCopyTexSubimage2D to implement the blit. This will end
+ up as a fast hardware blit on some drivers */
+ if (srcRb && drawAtt && drawAtt->Texture &&
+ srcX0 - srcX1 == dstX0 - dstX1 &&
+ srcY0 - srcY1 == dstY0 - dstY1 &&
+ srcX1 >= srcX0 &&
+ srcY1 >= srcY0 &&
+ srcX0 >= 0 && srcX1 <= readFb->Width &&
+ srcY0 >= 0 && srcY1 <= readFb->Height &&
+ dstX0 >= 0 && dstX1 <= drawFb->Width &&
+ dstY0 >= 0 && dstY1 <= drawFb->Height &&
+ !ctx->Scissor.Enabled) {
+ const struct gl_texture_object *texObj = drawAtt->Texture;
+ const GLuint dstLevel = drawAtt->TextureLevel;
+ const GLenum target = texObj->Target;
+
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, dstLevel);
+
+ if (intel_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ dstX0, dstY0,
+ srcRb,
+ srcX0, srcY0,
+ srcX1 - srcX0, /* width */
+ srcY1 - srcY0))
+ mask &= ~GL_COLOR_BUFFER_BIT;
+ }
+ }
+
+ return mask;
+}
+
+static void
+intel_blit_framebuffer(struct gl_context *ctx,
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter)
+{
+ /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
+ mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+
+#ifndef I915
+ mask = brw_blorp_framebuffer(intel_context(ctx),
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+#endif
+
+ _mesa_meta_BlitFramebuffer(ctx,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+}
+
+/**
+ * This is a no-op except on multisample buffers shared with DRI2.
+ */
+void
+intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
+{
+ if (irb->mt && irb->mt->singlesample_mt)
+ irb->mt->need_downsample = true;
+}
+
+void
+intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
+{
+ if (irb->mt) {
+ intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+ }
+}
+
+void
+intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
+{
+ if (irb->mt) {
+ intel_miptree_slice_set_needs_depth_resolve(irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+ }
+}
+
+bool
+intel_renderbuffer_resolve_hiz(struct intel_context *intel,
+ struct intel_renderbuffer *irb)
+{
+ if (irb->mt)
+ return intel_miptree_slice_resolve_hiz(intel,
+ irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+
+ return false;
+}
+
+bool
+intel_renderbuffer_resolve_depth(struct intel_context *intel,
+ struct intel_renderbuffer *irb)
+{
+ if (irb->mt)
+ return intel_miptree_slice_resolve_depth(intel,
+ irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+
+ return false;
+}
+
+/**
+ * Do one-time context initializations related to GL_EXT_framebuffer_object.
+ * Hook in device driver functions.
+ */
+void
+intel_fbo_init(struct intel_context *intel)
+{
+ intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
+ intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
+ intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
+ intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
+ intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
+ intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
+ intel->ctx.Driver.RenderTexture = intel_render_texture;
+ intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
+ intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
+ intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
+ intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
+
+#if FEATURE_OES_EGL_image
+ intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
+ intel_image_target_renderbuffer_storage;
+#endif
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_mipmap_tree.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
index 242fed0b6ae..556a82fc7ff 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_mipmap_tree.c
@@ -1 +1,1711 @@
-../intel/intel_mipmap_tree.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <GL/gl.h>
+#include <GL/internal/dri_interface.h>
+
+#include "intel_batchbuffer.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_resolve_map.h"
+#include "intel_span.h"
+#include "intel_tex_layout.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+
+#ifndef I915
+#include "brw_blorp.h"
+#endif
+
+#include "main/enums.h"
+#include "main/formats.h"
+#include "main/glformats.h"
+#include "main/texcompress_etc.h"
+#include "main/teximage.h"
+
+#define FILE_DEBUG_FLAG DEBUG_MIPTREE
+
+static GLenum
+target_to_target(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return GL_TEXTURE_CUBE_MAP_ARB;
+ default:
+ return target;
+ }
+}
+
+/**
+ * @param for_region Indicates that the caller is
+ * intel_miptree_create_for_region(). If true, then do not create
+ * \c stencil_mt.
+ */
+static struct intel_mipmap_tree *
+intel_miptree_create_internal(struct intel_context *intel,
+ GLenum target,
+ gl_format format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0,
+ bool for_region,
+ GLuint num_samples,
+ enum intel_msaa_layout msaa_layout)
+{
+ struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
+ int compress_byte = 0;
+
+ DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ _mesa_get_format_name(format),
+ first_level, last_level, mt);
+
+ if (_mesa_is_format_compressed(format))
+ compress_byte = intel_compressed_num_bytes(format);
+
+ mt->target = target_to_target(target);
+ mt->format = format;
+ mt->first_level = first_level;
+ mt->last_level = last_level;
+ mt->width0 = width0;
+ mt->height0 = height0;
+ mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
+ mt->num_samples = num_samples;
+ mt->compressed = compress_byte ? 1 : 0;
+ mt->msaa_layout = msaa_layout;
+ mt->refcount = 1;
+
+ /* array_spacing_lod0 is only used for non-IMS MSAA surfaces. TODO: can we
+ * use it elsewhere?
+ */
+ switch (msaa_layout) {
+ case INTEL_MSAA_LAYOUT_NONE:
+ case INTEL_MSAA_LAYOUT_IMS:
+ mt->array_spacing_lod0 = false;
+ break;
+ case INTEL_MSAA_LAYOUT_UMS:
+ case INTEL_MSAA_LAYOUT_CMS:
+ mt->array_spacing_lod0 = true;
+ break;
+ }
+
+ if (target == GL_TEXTURE_CUBE_MAP) {
+ assert(depth0 == 1);
+ mt->depth0 = 6;
+ } else {
+ mt->depth0 = depth0;
+ }
+
+ if (!for_region &&
+ _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
+ (intel->must_use_separate_stencil ||
+ (intel->has_separate_stencil &&
+ intel->vtbl.is_hiz_depth_format(intel, format)))) {
+ /* MSAA stencil surfaces always use IMS layout. */
+ enum intel_msaa_layout msaa_layout =
+ num_samples > 1 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
+ mt->stencil_mt = intel_miptree_create(intel,
+ mt->target,
+ MESA_FORMAT_S8,
+ mt->first_level,
+ mt->last_level,
+ mt->width0,
+ mt->height0,
+ mt->depth0,
+ true,
+ num_samples,
+ msaa_layout);
+ if (!mt->stencil_mt) {
+ intel_miptree_release(&mt);
+ return NULL;
+ }
+
+ /* Fix up the Z miptree format for how we're splitting out separate
+ * stencil. Gen7 expects there to be no stencil bits in its depth buffer.
+ */
+ if (mt->format == MESA_FORMAT_S8_Z24) {
+ mt->format = MESA_FORMAT_X8_Z24;
+ } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
+ mt->format = MESA_FORMAT_Z32_FLOAT;
+ mt->cpp = 4;
+ } else {
+ _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
+ _mesa_get_format_name(mt->format));
+ }
+ }
+
+ intel_get_texture_alignment_unit(intel, mt->format,
+ &mt->align_w, &mt->align_h);
+
+#ifdef I915
+ (void) intel;
+ if (intel->is_945)
+ i945_miptree_layout(mt);
+ else
+ i915_miptree_layout(mt);
+#else
+ brw_miptree_layout(intel, mt);
+#endif
+
+ return mt;
+}
+
+
+struct intel_mipmap_tree *
+intel_miptree_create(struct intel_context *intel,
+ GLenum target,
+ gl_format format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0,
+ bool expect_accelerated_upload,
+ GLuint num_samples,
+ enum intel_msaa_layout msaa_layout)
+{
+ struct intel_mipmap_tree *mt;
+ uint32_t tiling = I915_TILING_NONE;
+ GLenum base_format;
+ bool wraps_etc1 = false;
+ GLuint total_width, total_height;
+
+ if (format == MESA_FORMAT_ETC1_RGB8) {
+ format = MESA_FORMAT_RGBX8888_REV;
+ wraps_etc1 = true;
+ }
+
+ base_format = _mesa_get_format_base_format(format);
+
+ if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
+ if (intel->gen >= 4 &&
+ (base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL_EXT))
+ tiling = I915_TILING_Y;
+ else if (msaa_layout != INTEL_MSAA_LAYOUT_NONE) {
+ /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
+ * Surface"):
+ *
+ * [DevSNB+]: For multi-sample render targets, this field must be
+ * 1. MSRTs can only be tiled.
+ *
+ * Our usual reason for preferring X tiling (fast blits using the
+ * blitting engine) doesn't apply to MSAA, since we'll generally be
+ * downsampling or upsampling when blitting between the MSAA buffer
+ * and another buffer, and the blitting engine doesn't support that.
+ * So use Y tiling, since it makes better use of the cache.
+ */
+ tiling = I915_TILING_Y;
+ } else if (width0 >= 64)
+ tiling = I915_TILING_X;
+ }
+
+ mt = intel_miptree_create_internal(intel, target, format,
+ first_level, last_level, width0,
+ height0, depth0,
+ false, num_samples, msaa_layout);
+ /*
+ * pitch == 0 || height == 0 indicates the null texture
+ */
+ if (!mt || !mt->total_width || !mt->total_height) {
+ intel_miptree_release(&mt);
+ return NULL;
+ }
+
+ total_width = mt->total_width;
+ total_height = mt->total_height;
+
+ if (format == MESA_FORMAT_S8) {
+ /* The stencil buffer is W tiled. However, we request from the kernel a
+ * non-tiled buffer because the GTT is incapable of W fencing. So round
+ * up the width and height to match the size of W tiles (64x64).
+ */
+ tiling = I915_TILING_NONE;
+ total_width = ALIGN(total_width, 64);
+ total_height = ALIGN(total_height, 64);
+ }
+
+ mt->wraps_etc1 = wraps_etc1;
+ mt->region = intel_region_alloc(intel->intelScreen,
+ tiling,
+ mt->cpp,
+ total_width,
+ total_height,
+ expect_accelerated_upload);
+ mt->offset = 0;
+
+ if (!mt->region) {
+ intel_miptree_release(&mt);
+ return NULL;
+ }
+
+ return mt;
+}
+
+
+struct intel_mipmap_tree *
+intel_miptree_create_for_region(struct intel_context *intel,
+ GLenum target,
+ gl_format format,
+ struct intel_region *region)
+{
+ struct intel_mipmap_tree *mt;
+
+ mt = intel_miptree_create_internal(intel, target, format,
+ 0, 0,
+ region->width, region->height, 1,
+ true, 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_NONE);
+ if (!mt)
+ return mt;
+
+ intel_region_reference(&mt->region, region);
+
+ return mt;
+}
+
+/**
+ * Determine which MSAA layout should be used by the MSAA surface being
+ * created, based on the chip generation and the surface type.
+ */
+static enum intel_msaa_layout
+compute_msaa_layout(struct intel_context *intel, gl_format format)
+{
+ /* Prior to Gen7, all MSAA surfaces used IMS layout. */
+ if (intel->gen < 7)
+ return INTEL_MSAA_LAYOUT_IMS;
+
+ /* In Gen7, IMS layout is only used for depth and stencil buffers. */
+ switch (_mesa_get_format_base_format(format)) {
+ case GL_DEPTH_COMPONENT:
+ case GL_STENCIL_INDEX:
+ case GL_DEPTH_STENCIL:
+ return INTEL_MSAA_LAYOUT_IMS;
+ default:
+ /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
+ *
+ * This field must be set to 0 for all SINT MSRTs when all RT channels
+ * are not written
+ *
+ * In practice this means that we have to disable MCS for all signed
+ * integer MSAA buffers. The alternative, to disable MCS only when one
+ * of the render target channels is disabled, is impractical because it
+ * would require converting between CMS and UMS MSAA layouts on the fly,
+ * which is expensive.
+ */
+ if (_mesa_get_format_datatype(format) == GL_INT) {
+ /* TODO: is this workaround needed for future chipsets? */
+ assert(intel->gen == 7);
+ return INTEL_MSAA_LAYOUT_UMS;
+ } else {
+ return INTEL_MSAA_LAYOUT_CMS;
+ }
+ }
+}
+
+/**
+ * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
+ *
+ * For a multisample DRI2 buffer, this wraps the given region with
+ * a singlesample miptree, then creates a multisample miptree into which the
+ * singlesample miptree is embedded as a child.
+ */
+struct intel_mipmap_tree*
+intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
+ unsigned dri_attachment,
+ gl_format format,
+ uint32_t num_samples,
+ struct intel_region *region)
+{
+ struct intel_mipmap_tree *singlesample_mt = NULL;
+ struct intel_mipmap_tree *multisample_mt = NULL;
+ GLenum base_format = _mesa_get_format_base_format(format);
+
+ /* Only the front and back buffers, which are color buffers, are shared
+ * through DRI2.
+ */
+ assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
+ dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+ dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
+ assert(base_format == GL_RGB || base_format == GL_RGBA);
+
+ singlesample_mt = intel_miptree_create_for_region(intel, GL_TEXTURE_2D,
+ format, region);
+ if (!singlesample_mt)
+ return NULL;
+
+ if (num_samples == 0)
+ return singlesample_mt;
+
+ multisample_mt = intel_miptree_create_for_renderbuffer(intel,
+ format,
+ region->width,
+ region->height,
+ num_samples);
+ if (!multisample_mt) {
+ intel_miptree_release(&singlesample_mt);
+ return NULL;
+ }
+
+ multisample_mt->singlesample_mt = singlesample_mt;
+ multisample_mt->need_downsample = false;
+
+ if (intel->is_front_buffer_rendering &&
+ (dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
+ dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT)) {
+ intel_miptree_upsample(intel, multisample_mt);
+ }
+
+ return multisample_mt;
+}
+
+struct intel_mipmap_tree*
+intel_miptree_create_for_renderbuffer(struct intel_context *intel,
+ gl_format format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t num_samples)
+{
+ struct intel_mipmap_tree *mt;
+ uint32_t depth = 1;
+ enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
+ const uint32_t singlesample_width = width;
+ const uint32_t singlesample_height = height;
+ bool ok;
+
+ if (num_samples > 1) {
+ /* Adjust width/height/depth for MSAA */
+ msaa_layout = compute_msaa_layout(intel, format);
+ if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
+ /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
+ *
+ * "Any of the other messages (sample*, LOD, load4) used with a
+ * (4x) multisampled surface will in-effect sample a surface with
+ * double the height and width as that indicated in the surface
+ * state. Each pixel position on the original-sized surface is
+ * replaced with a 2x2 of samples with the following arrangement:
+ *
+ * sample 0 sample 2
+ * sample 1 sample 3"
+ *
+ * Thus, when sampling from a multisampled texture, it behaves as
+ * though the layout in memory for (x,y,sample) is:
+ *
+ * (0,0,0) (0,0,2) (1,0,0) (1,0,2)
+ * (0,0,1) (0,0,3) (1,0,1) (1,0,3)
+ *
+ * (0,1,0) (0,1,2) (1,1,0) (1,1,2)
+ * (0,1,1) (0,1,3) (1,1,1) (1,1,3)
+ *
+ * However, the actual layout of multisampled data in memory is:
+ *
+ * (0,0,0) (1,0,0) (0,0,1) (1,0,1)
+ * (0,1,0) (1,1,0) (0,1,1) (1,1,1)
+ *
+ * (0,0,2) (1,0,2) (0,0,3) (1,0,3)
+ * (0,1,2) (1,1,2) (0,1,3) (1,1,3)
+ *
+ * This pattern repeats for each 2x2 pixel block.
+ *
+ * As a result, when calculating the size of our 4-sample buffer for
+ * an odd width or height, we have to align before scaling up because
+ * sample 3 is in that bottom right 2x2 block.
+ */
+ switch (num_samples) {
+ case 4:
+ width = ALIGN(width, 2) * 2;
+ height = ALIGN(height, 2) * 2;
+ break;
+ case 8:
+ width = ALIGN(width, 2) * 4;
+ height = ALIGN(height, 2) * 2;
+ break;
+ default:
+ /* num_samples should already have been quantized to 0, 1, 4, or
+ * 8.
+ */
+ assert(false);
+ }
+ } else {
+ /* Non-interleaved */
+ depth = num_samples;
+ }
+ }
+
+ mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
+ width, height, depth, true, num_samples,
+ msaa_layout);
+ if (!mt)
+ goto fail;
+
+ if (intel->vtbl.is_hiz_depth_format(intel, format)) {
+ ok = intel_miptree_alloc_hiz(intel, mt, num_samples);
+ if (!ok)
+ goto fail;
+ }
+
+ if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
+ ok = intel_miptree_alloc_mcs(intel, mt, num_samples);
+ if (!ok)
+ goto fail;
+ }
+
+ mt->singlesample_width0 = singlesample_width;
+ mt->singlesample_height0 = singlesample_height;
+
+ return mt;
+
+fail:
+ intel_miptree_release(&mt);
+ return NULL;
+}
+
+void
+intel_miptree_reference(struct intel_mipmap_tree **dst,
+ struct intel_mipmap_tree *src)
+{
+ if (*dst == src)
+ return;
+
+ intel_miptree_release(dst);
+
+ if (src) {
+ src->refcount++;
+ DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
+ }
+
+ *dst = src;
+}
+
+
+void
+intel_miptree_release(struct intel_mipmap_tree **mt)
+{
+ if (!*mt)
+ return;
+
+ DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
+ if (--(*mt)->refcount <= 0) {
+ GLuint i;
+
+ DBG("%s deleting %p\n", __FUNCTION__, *mt);
+
+ intel_region_release(&((*mt)->region));
+ intel_miptree_release(&(*mt)->stencil_mt);
+ intel_miptree_release(&(*mt)->hiz_mt);
+ intel_miptree_release(&(*mt)->mcs_mt);
+ intel_miptree_release(&(*mt)->singlesample_mt);
+ intel_resolve_map_clear(&(*mt)->hiz_map);
+
+ for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
+ free((*mt)->level[i].slice);
+ }
+
+ free(*mt);
+ }
+ *mt = NULL;
+}
+
+void
+intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
+ int *width, int *height, int *depth)
+{
+ switch (image->TexObject->Target) {
+ case GL_TEXTURE_1D_ARRAY:
+ *width = image->Width;
+ *height = 1;
+ *depth = image->Height;
+ break;
+ default:
+ *width = image->Width;
+ *height = image->Height;
+ *depth = image->Depth;
+ break;
+ }
+}
+
+/**
+ * Can the image be pulled into a unified mipmap tree? This mirrors
+ * the completeness test in a lot of ways.
+ *
+ * Not sure whether I want to pass gl_texture_image here.
+ */
+bool
+intel_miptree_match_image(struct intel_mipmap_tree *mt,
+ struct gl_texture_image *image)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(image);
+ GLuint level = intelImage->base.Base.Level;
+ int width, height, depth;
+
+ if (target_to_target(image->TexObject->Target) != mt->target)
+ return false;
+
+ if (image->TexFormat != mt->format &&
+ !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
+ mt->format == MESA_FORMAT_X8_Z24 &&
+ mt->stencil_mt)) {
+ return false;
+ }
+
+ intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
+
+ if (mt->target == GL_TEXTURE_CUBE_MAP)
+ depth = 6;
+
+ /* Test image dimensions against the base level image adjusted for
+ * minification. This will also catch images not present in the
+ * tree, changed targets, etc.
+ */
+ if (width != mt->level[level].width ||
+ height != mt->level[level].height ||
+ depth != mt->level[level].depth)
+ return false;
+
+ return true;
+}
+
+
+void
+intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h, GLuint d)
+{
+ mt->level[level].width = w;
+ mt->level[level].height = h;
+ mt->level[level].depth = d;
+ mt->level[level].level_x = x;
+ mt->level[level].level_y = y;
+
+ DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
+ level, w, h, d, x, y);
+
+ assert(mt->level[level].slice == NULL);
+
+ mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
+ mt->level[level].slice[0].x_offset = mt->level[level].level_x;
+ mt->level[level].slice[0].y_offset = mt->level[level].level_y;
+}
+
+
+void
+intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint img,
+ GLuint x, GLuint y)
+{
+ if (img == 0 && level == 0)
+ assert(x == 0 && y == 0);
+
+ assert(img < mt->level[level].depth);
+
+ mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
+ mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
+
+ DBG("%s level %d img %d pos %d,%d\n",
+ __FUNCTION__, level, img,
+ mt->level[level].slice[img].x_offset,
+ mt->level[level].slice[img].y_offset);
+}
+
+
+/**
+ * For cube map textures, either the \c face parameter can be used, of course,
+ * or the cube face can be interpreted as a depth layer and the \c layer
+ * parameter used.
+ */
+void
+intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint face, GLuint layer,
+ GLuint *x, GLuint *y)
+{
+ int slice;
+
+ if (face > 0) {
+ assert(mt->target == GL_TEXTURE_CUBE_MAP);
+ assert(face < 6);
+ assert(layer == 0);
+ slice = face;
+ } else {
+ /* This branch may be taken even if the texture target is a cube map. In
+ * that case, the caller chose to interpret each cube face as a layer.
+ */
+ assert(face == 0);
+ slice = layer;
+ }
+
+ *x = mt->level[level].slice[slice].x_offset;
+ *y = mt->level[level].slice[slice].y_offset;
+}
+
+static void
+intel_miptree_copy_slice(struct intel_context *intel,
+ struct intel_mipmap_tree *dst_mt,
+ struct intel_mipmap_tree *src_mt,
+ int level,
+ int face,
+ int depth)
+
+{
+ gl_format format = src_mt->format;
+ uint32_t width = src_mt->level[level].width;
+ uint32_t height = src_mt->level[level].height;
+
+ assert(depth < src_mt->level[level].depth);
+
+ if (dst_mt->compressed) {
+ height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
+ width = ALIGN(width, dst_mt->align_w);
+ }
+
+ uint32_t dst_x, dst_y, src_x, src_y;
+ intel_miptree_get_image_offset(dst_mt, level, face, depth,
+ &dst_x, &dst_y);
+ intel_miptree_get_image_offset(src_mt, level, face, depth,
+ &src_x, &src_y);
+
+ DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
+ src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
+ dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
+ width, height);
+
+ if (!intelEmitCopyBlit(intel,
+ dst_mt->region->cpp,
+ src_mt->region->pitch, src_mt->region->bo,
+ 0, src_mt->region->tiling,
+ dst_mt->region->pitch, dst_mt->region->bo,
+ 0, dst_mt->region->tiling,
+ src_x, src_y,
+ dst_x, dst_y,
+ width, height,
+ GL_COPY)) {
+
+ fallback_debug("miptree validate blit for %s failed\n",
+ _mesa_get_format_name(format));
+ void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
+ void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
+
+ _mesa_copy_rect(dst,
+ dst_mt->cpp,
+ dst_mt->region->pitch,
+ dst_x, dst_y,
+ width, height,
+ src, src_mt->region->pitch,
+ src_x, src_y);
+
+ intel_region_unmap(intel, dst_mt->region);
+ intel_region_unmap(intel, src_mt->region);
+ }
+
+ if (src_mt->stencil_mt) {
+ intel_miptree_copy_slice(intel,
+ dst_mt->stencil_mt, src_mt->stencil_mt,
+ level, face, depth);
+ }
+}
+
+/**
+ * Copies the image's current data to the given miptree, and associates that
+ * miptree with the image.
+ */
+void
+intel_miptree_copy_teximage(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ struct intel_mipmap_tree *dst_mt)
+{
+ struct intel_mipmap_tree *src_mt = intelImage->mt;
+ int level = intelImage->base.Base.Level;
+ int face = intelImage->base.Base.Face;
+ GLuint depth = intelImage->base.Base.Depth;
+
+ for (int slice = 0; slice < depth; slice++) {
+ intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
+ }
+
+ intel_miptree_reference(&intelImage->mt, dst_mt);
+}
+
+bool
+intel_miptree_alloc_mcs(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ GLuint num_samples)
+{
+ assert(mt->mcs_mt == NULL);
+ assert(intel->gen >= 7); /* MCS only used on Gen7+ */
+
+ /* Choose the correct format for the MCS buffer. All that really matters
+ * is that we allocate the right buffer size, since we'll always be
+ * accessing this miptree using MCS-specific hardware mechanisms, which
+ * infer the correct format based on num_samples.
+ */
+ gl_format format;
+ switch (num_samples) {
+ case 4:
+ /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
+ * each sample).
+ */
+ format = MESA_FORMAT_R8;
+ break;
+ case 8:
+ /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
+ * for each sample, plus 8 padding bits).
+ */
+ format = MESA_FORMAT_R_UINT32;
+ break;
+ default:
+ assert(!"Unrecognized sample count in intel_miptree_alloc_mcs");
+ break;
+ };
+
+ /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
+ *
+ * "The MCS surface must be stored as Tile Y."
+ *
+ * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
+ * intel_miptree_create() to use Y tiling. msaa_format is otherwise
+ * ignored for the MCS miptree.
+ */
+ mt->mcs_mt = intel_miptree_create(intel,
+ mt->target,
+ format,
+ mt->first_level,
+ mt->last_level,
+ mt->width0,
+ mt->height0,
+ mt->depth0,
+ true,
+ 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_CMS);
+
+ /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
+ *
+ * When MCS buffer is enabled and bound to MSRT, it is required that it
+ * is cleared prior to any rendering.
+ *
+ * Since we don't use the MCS buffer for any purpose other than rendering,
+ * it makes sense to just clear it immediately upon allocation.
+ *
+ * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
+ */
+ void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
+ memset(data, 0xff, mt->mcs_mt->region->bo->size);
+ intel_region_unmap(intel, mt->mcs_mt->region);
+
+ return mt->mcs_mt;
+}
+
+bool
+intel_miptree_alloc_hiz(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ GLuint num_samples)
+{
+ assert(mt->hiz_mt == NULL);
+ /* MSAA HiZ surfaces always use IMS layout. */
+ mt->hiz_mt = intel_miptree_create(intel,
+ mt->target,
+ MESA_FORMAT_X8_Z24,
+ mt->first_level,
+ mt->last_level,
+ mt->width0,
+ mt->height0,
+ mt->depth0,
+ true,
+ num_samples,
+ INTEL_MSAA_LAYOUT_IMS);
+
+ if (!mt->hiz_mt)
+ return false;
+
+ /* Mark that all slices need a HiZ resolve. */
+ struct intel_resolve_map *head = &mt->hiz_map;
+ for (int level = mt->first_level; level <= mt->last_level; ++level) {
+ for (int layer = 0; layer < mt->level[level].depth; ++layer) {
+ head->next = malloc(sizeof(*head->next));
+ head->next->prev = head;
+ head->next->next = NULL;
+ head = head->next;
+
+ head->level = level;
+ head->layer = layer;
+ head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
+ }
+ }
+
+ return true;
+}
+
+void
+intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ intel_miptree_check_level_layer(mt, level, layer);
+
+ if (!mt->hiz_mt)
+ return;
+
+ intel_resolve_map_set(&mt->hiz_map,
+ level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
+}
+
+
+void
+intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ intel_miptree_check_level_layer(mt, level, layer);
+
+ if (!mt->hiz_mt)
+ return;
+
+ intel_resolve_map_set(&mt->hiz_map,
+ level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
+}
+
+static bool
+intel_miptree_slice_resolve(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer,
+ enum gen6_hiz_op need)
+{
+ intel_miptree_check_level_layer(mt, level, layer);
+
+ struct intel_resolve_map *item =
+ intel_resolve_map_get(&mt->hiz_map, level, layer);
+
+ if (!item || item->need != need)
+ return false;
+
+ intel_hiz_exec(intel, mt, level, layer, need);
+ intel_resolve_map_remove(item);
+ return true;
+}
+
+bool
+intel_miptree_slice_resolve_hiz(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ return intel_miptree_slice_resolve(intel, mt, level, layer,
+ GEN6_HIZ_OP_HIZ_RESOLVE);
+}
+
+bool
+intel_miptree_slice_resolve_depth(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t level,
+ uint32_t layer)
+{
+ return intel_miptree_slice_resolve(intel, mt, level, layer,
+ GEN6_HIZ_OP_DEPTH_RESOLVE);
+}
+
+static bool
+intel_miptree_all_slices_resolve(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ enum gen6_hiz_op need)
+{
+ bool did_resolve = false;
+ struct intel_resolve_map *i, *next;
+
+ for (i = mt->hiz_map.next; i; i = next) {
+ next = i->next;
+ if (i->need != need)
+ continue;
+
+ intel_hiz_exec(intel, mt, i->level, i->layer, need);
+ intel_resolve_map_remove(i);
+ did_resolve = true;
+ }
+
+ return did_resolve;
+}
+
+bool
+intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ return intel_miptree_all_slices_resolve(intel, mt,
+ GEN6_HIZ_OP_HIZ_RESOLVE);
+}
+
+bool
+intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ return intel_miptree_all_slices_resolve(intel, mt,
+ GEN6_HIZ_OP_DEPTH_RESOLVE);
+}
+
+static void
+intel_miptree_updownsample(struct intel_context *intel,
+ struct intel_mipmap_tree *src,
+ struct intel_mipmap_tree *dst,
+ unsigned width,
+ unsigned height)
+{
+#ifndef I915
+ int src_x0 = 0;
+ int src_y0 = 0;
+ int dst_x0 = 0;
+ int dst_y0 = 0;
+
+ intel_miptree_slice_resolve_depth(intel, src, 0, 0);
+ intel_miptree_slice_resolve_depth(intel, dst, 0, 0);
+
+ brw_blorp_blit_miptrees(intel,
+ src, 0 /* level */, 0 /* layer */,
+ dst, 0 /* level */, 0 /* layer */,
+ src_x0, src_y0,
+ dst_x0, dst_y0,
+ width, height,
+ false, false /*mirror x, y*/);
+
+ if (src->stencil_mt) {
+ brw_blorp_blit_miptrees(intel,
+ src->stencil_mt, 0 /* level */, 0 /* layer */,
+ dst->stencil_mt, 0 /* level */, 0 /* layer */,
+ src_x0, src_y0,
+ dst_x0, dst_y0,
+ width, height,
+ false, false /*mirror x, y*/);
+ }
+#endif /* I915 */
+}
+
+static void
+assert_is_flat(struct intel_mipmap_tree *mt)
+{
+ assert(mt->target == GL_TEXTURE_2D);
+ assert(mt->first_level == 0);
+ assert(mt->last_level == 0);
+}
+
+/**
+ * \brief Downsample from mt to mt->singlesample_mt.
+ *
+ * If the miptree needs no downsample, then skip.
+ */
+void
+intel_miptree_downsample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ assert_is_flat(mt);
+
+ if (!mt->need_downsample)
+ return;
+ intel_miptree_updownsample(intel,
+ mt, mt->singlesample_mt,
+ mt->singlesample_mt->width0,
+ mt->singlesample_mt->height0);
+ mt->need_downsample = false;
+
+ /* Strictly speaking, after a downsample on a depth miptree, a hiz
+ * resolve is needed on the singlesample miptree. However, since the
+ * singlesample miptree is never rendered to, the hiz resolve will never
+ * occur. Therefore we do not mark the needed hiz resolve after
+ * downsampling.
+ */
+}
+
+/**
+ * \brief Upsample from mt->singlesample_mt to mt.
+ *
+ * The upsample is done unconditionally.
+ */
+void
+intel_miptree_upsample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ assert_is_flat(mt);
+ assert(!mt->need_downsample);
+
+ intel_miptree_updownsample(intel,
+ mt->singlesample_mt, mt,
+ mt->singlesample_mt->width0,
+ mt->singlesample_mt->height0);
+ intel_miptree_slice_set_needs_hiz_resolve(mt, 0, 0);
+}
+
+static void
+intel_miptree_map_gtt(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ unsigned int bw, bh;
+ void *base;
+ unsigned int image_x, image_y;
+ int x = map->x;
+ int y = map->y;
+
+ /* For compressed formats, the stride is the number of bytes per
+ * row of blocks. intel_miptree_get_image_offset() already does
+ * the divide.
+ */
+ _mesa_get_format_block_size(mt->format, &bw, &bh);
+ assert(y % bh == 0);
+ y /= bh;
+
+ base = intel_region_map(intel, mt->region, map->mode);
+
+ if (base == NULL)
+ map->ptr = NULL;
+ else {
+ /* Note that in the case of cube maps, the caller must have passed the
+ * slice number referencing the face.
+ */
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+ x += image_x;
+ y += image_y;
+
+ map->stride = mt->region->pitch * mt->cpp;
+ map->ptr = base + y * map->stride + x * mt->cpp;
+ }
+
+ DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, _mesa_get_format_name(mt->format),
+ x, y, map->ptr, map->stride);
+}
+
+static void
+intel_miptree_unmap_gtt(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ intel_region_unmap(intel, mt->region);
+}
+
+static void
+intel_miptree_map_blit(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ unsigned int image_x, image_y;
+ int x = map->x;
+ int y = map->y;
+ int ret;
+
+ /* The blitter requires the pitch to be aligned to 4. */
+ map->stride = ALIGN(map->w * mt->region->cpp, 4);
+
+ map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
+ map->stride * map->h, 4096);
+ if (!map->bo) {
+ fprintf(stderr, "Failed to allocate blit temporary\n");
+ goto fail;
+ }
+
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+ x += image_x;
+ y += image_y;
+
+ if (!intelEmitCopyBlit(intel,
+ mt->region->cpp,
+ mt->region->pitch, mt->region->bo,
+ 0, mt->region->tiling,
+ map->stride / mt->region->cpp, map->bo,
+ 0, I915_TILING_NONE,
+ x, y,
+ 0, 0,
+ map->w, map->h,
+ GL_COPY)) {
+ fprintf(stderr, "Failed to blit\n");
+ goto fail;
+ }
+
+ intel_batchbuffer_flush(intel);
+ ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
+ if (ret) {
+ fprintf(stderr, "Failed to map blit temporary\n");
+ goto fail;
+ }
+
+ map->ptr = map->bo->virtual;
+
+ DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, _mesa_get_format_name(mt->format),
+ x, y, map->ptr, map->stride);
+
+ return;
+
+fail:
+ drm_intel_bo_unreference(map->bo);
+ map->ptr = NULL;
+ map->stride = 0;
+}
+
+static void
+intel_miptree_unmap_blit(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ assert(!(map->mode & GL_MAP_WRITE_BIT));
+
+ drm_intel_bo_unmap(map->bo);
+ drm_intel_bo_unreference(map->bo);
+}
+
+static void
+intel_miptree_map_s8(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ map->stride = map->w;
+ map->buffer = map->ptr = malloc(map->stride * map->h);
+ if (!map->buffer)
+ return;
+
+ /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
+ * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
+ * invalidate is set, since we'll be writing the whole rectangle from our
+ * temporary buffer back out.
+ */
+ if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
+ uint8_t *untiled_s8_map = map->ptr;
+ uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
+ GL_MAP_READ_BIT);
+ unsigned int image_x, image_y;
+
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
+ x + image_x + map->x,
+ y + image_y + map->y,
+ intel->has_swizzling);
+ untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
+ }
+ }
+
+ intel_region_unmap(intel, mt->region);
+
+ DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
+ } else {
+ DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, map->ptr, map->stride);
+ }
+}
+
+static void
+intel_miptree_unmap_s8(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ unsigned int image_x, image_y;
+ uint8_t *untiled_s8_map = map->ptr;
+ uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
+
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
+ x + map->x,
+ y + map->y,
+ intel->has_swizzling);
+ tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
+ }
+ }
+
+ intel_region_unmap(intel, mt->region);
+ }
+
+ free(map->buffer);
+}
+
+static void
+intel_miptree_map_etc1(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ /* For justification of these invariants,
+ * see intel_mipmap_tree:wraps_etc1.
+ */
+ assert(mt->wraps_etc1);
+ assert(mt->format == MESA_FORMAT_RGBX8888_REV);
+
+ /* From the GL_OES_compressed_ETC1_RGB8_texture spec:
+ * INVALID_OPERATION is generated by CompressedTexSubImage2D,
+ * TexSubImage2D, or CopyTexSubImage2D if the texture image <level>
+ * bound to <target> has internal format ETC1_RGB8_OES.
+ *
+ * This implies that intel_miptree_map_etc1() can only be called from
+ * glCompressedTexImage2D, and hence the assertions below hold.
+ */
+ assert(map->mode & GL_MAP_WRITE_BIT);
+ assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
+ assert(map->x == 0);
+ assert(map->y == 0);
+
+ /* Each ETC1 block contains 4x4 pixels in 8 bytes. */
+ map->stride = 2 * map->w;
+ map->buffer = map->ptr = malloc(map->stride * map->h);
+}
+
+static void
+intel_miptree_unmap_etc1(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ uint32_t image_x;
+ uint32_t image_y;
+ intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
+
+ uint8_t *xbgr = intel_region_map(intel, mt->region, map->mode)
+ + image_y * mt->region->pitch * mt->region->cpp
+ + image_x * mt->region->cpp;
+
+ _mesa_etc1_unpack_rgba8888(xbgr, mt->region->pitch * mt->region->cpp,
+ map->ptr, map->stride,
+ map->w, map->h);
+
+ intel_region_unmap(intel, mt->region);
+ free(map->buffer);
+}
+
+/**
+ * Mapping function for packed depth/stencil miptrees backed by real separate
+ * miptrees for depth and stencil.
+ *
+ * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
+ * separate from the depth buffer. Yet at the GL API level, we have to expose
+ * packed depth/stencil textures and FBO attachments, and Mesa core expects to
+ * be able to map that memory for texture storage and glReadPixels-type
+ * operations. We give Mesa core that access by mallocing a temporary and
+ * copying the data between the actual backing store and the temporary.
+ */
+static void
+intel_miptree_map_depthstencil(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level, unsigned int slice)
+{
+ struct intel_mipmap_tree *z_mt = mt;
+ struct intel_mipmap_tree *s_mt = mt->stencil_mt;
+ bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
+ int packed_bpp = map_z32f_x24s8 ? 8 : 4;
+
+ map->stride = map->w * packed_bpp;
+ map->buffer = map->ptr = malloc(map->stride * map->h);
+ if (!map->buffer)
+ return;
+
+ /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
+ * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
+ * invalidate is set, since we'll be writing the whole rectangle from our
+ * temporary buffer back out.
+ */
+ if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
+ uint32_t *packed_map = map->ptr;
+ uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
+ uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
+ unsigned int s_image_x, s_image_y;
+ unsigned int z_image_x, z_image_y;
+
+ intel_miptree_get_image_offset(s_mt, level, 0, slice,
+ &s_image_x, &s_image_y);
+ intel_miptree_get_image_offset(z_mt, level, 0, slice,
+ &z_image_x, &z_image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ int map_x = map->x + x, map_y = map->y + y;
+ ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
+ map_x + s_image_x,
+ map_y + s_image_y,
+ intel->has_swizzling);
+ ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
+ (map_x + z_image_x));
+ uint8_t s = s_map[s_offset];
+ uint32_t z = z_map[z_offset];
+
+ if (map_z32f_x24s8) {
+ packed_map[(y * map->w + x) * 2 + 0] = z;
+ packed_map[(y * map->w + x) * 2 + 1] = s;
+ } else {
+ packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
+ }
+ }
+ }
+
+ intel_region_unmap(intel, s_mt->region);
+ intel_region_unmap(intel, z_mt->region);
+
+ DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
+ __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ z_mt, map->x + z_image_x, map->y + z_image_y,
+ s_mt, map->x + s_image_x, map->y + s_image_y,
+ map->ptr, map->stride);
+ } else {
+ DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ mt, map->ptr, map->stride);
+ }
+}
+
+static void
+intel_miptree_unmap_depthstencil(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ struct intel_miptree_map *map,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_mipmap_tree *z_mt = mt;
+ struct intel_mipmap_tree *s_mt = mt->stencil_mt;
+ bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
+
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ uint32_t *packed_map = map->ptr;
+ uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
+ uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
+ unsigned int s_image_x, s_image_y;
+ unsigned int z_image_x, z_image_y;
+
+ intel_miptree_get_image_offset(s_mt, level, 0, slice,
+ &s_image_x, &s_image_y);
+ intel_miptree_get_image_offset(z_mt, level, 0, slice,
+ &z_image_x, &z_image_y);
+
+ for (uint32_t y = 0; y < map->h; y++) {
+ for (uint32_t x = 0; x < map->w; x++) {
+ ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
+ x + s_image_x + map->x,
+ y + s_image_y + map->y,
+ intel->has_swizzling);
+ ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
+ (x + z_image_x));
+
+ if (map_z32f_x24s8) {
+ z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
+ s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
+ } else {
+ uint32_t packed = packed_map[y * map->w + x];
+ s_map[s_offset] = packed >> 24;
+ z_map[z_offset] = packed;
+ }
+ }
+ }
+
+ intel_region_unmap(intel, s_mt->region);
+ intel_region_unmap(intel, z_mt->region);
+
+ DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
+ __FUNCTION__,
+ map->x, map->y, map->w, map->h,
+ z_mt, _mesa_get_format_name(z_mt->format),
+ map->x + z_image_x, map->y + z_image_y,
+ s_mt, map->x + s_image_x, map->y + s_image_y,
+ map->ptr, map->stride);
+ }
+
+ free(map->buffer);
+}
+
+/**
+ * Create and attach a map to the miptree at (level, slice). Return the
+ * attached map.
+ */
+static struct intel_miptree_map*
+intel_miptree_attach_map(struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode)
+{
+ struct intel_miptree_map *map = calloc(1, sizeof(*map));
+
+ if (!map)
+ return NULL;
+
+ assert(mt->level[level].slice[slice].map == NULL);
+ mt->level[level].slice[slice].map = map;
+
+ map->mode = mode;
+ map->x = x;
+ map->y = y;
+ map->w = w;
+ map->h = h;
+
+ return map;
+}
+
+/**
+ * Release the map at (level, slice).
+ */
+static void
+intel_miptree_release_map(struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map **map;
+
+ map = &mt->level[level].slice[slice].map;
+ free(*map);
+ *map = NULL;
+}
+
+static void
+intel_miptree_map_singlesample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ struct intel_miptree_map *map;
+
+ assert(mt->num_samples <= 1);
+
+ map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+ if (!map){
+ *out_ptr = NULL;
+ *out_stride = 0;
+ return;
+ }
+
+ intel_miptree_slice_resolve_depth(intel, mt, level, slice);
+ if (map->mode & GL_MAP_WRITE_BIT) {
+ intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
+ }
+
+ if (mt->format == MESA_FORMAT_S8) {
+ intel_miptree_map_s8(intel, mt, map, level, slice);
+ } else if (mt->wraps_etc1) {
+ intel_miptree_map_etc1(intel, mt, map, level, slice);
+ } else if (mt->stencil_mt) {
+ intel_miptree_map_depthstencil(intel, mt, map, level, slice);
+ } else if (intel->has_llc &&
+ !(mode & GL_MAP_WRITE_BIT) &&
+ !mt->compressed &&
+ mt->region->tiling == I915_TILING_X) {
+ intel_miptree_map_blit(intel, mt, map, level, slice);
+ } else {
+ intel_miptree_map_gtt(intel, mt, map, level, slice);
+ }
+
+ *out_ptr = map->ptr;
+ *out_stride = map->stride;
+
+ if (map->ptr == NULL)
+ intel_miptree_release_map(mt, level, slice);
+}
+
+static void
+intel_miptree_unmap_singlesample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map *map = mt->level[level].slice[slice].map;
+
+ assert(mt->num_samples <= 1);
+
+ if (!map)
+ return;
+
+ DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
+ mt, _mesa_get_format_name(mt->format), level, slice);
+
+ if (mt->format == MESA_FORMAT_S8) {
+ intel_miptree_unmap_s8(intel, mt, map, level, slice);
+ } else if (mt->wraps_etc1) {
+ intel_miptree_unmap_etc1(intel, mt, map, level, slice);
+ } else if (mt->stencil_mt) {
+ intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
+ } else if (map->bo) {
+ intel_miptree_unmap_blit(intel, mt, map, level, slice);
+ } else {
+ intel_miptree_unmap_gtt(intel, mt, map, level, slice);
+ }
+
+ intel_miptree_release_map(mt, level, slice);
+}
+
+static void
+intel_miptree_map_multisample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ struct intel_miptree_map *map;
+
+ assert(mt->num_samples > 1);
+
+ /* Only flat, renderbuffer-like miptrees are supported. */
+ if (mt->target != GL_TEXTURE_2D ||
+ mt->first_level != 0 ||
+ mt->last_level != 0) {
+ _mesa_problem(&intel->ctx, "attempt to map a multisample miptree for "
+ "which (target, first_level, last_level != "
+ "(GL_TEXTURE_2D, 0, 0)");
+ goto fail;
+ }
+
+ map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
+ if (!map)
+ goto fail;
+
+ if (!mt->singlesample_mt) {
+ mt->singlesample_mt =
+ intel_miptree_create_for_renderbuffer(intel,
+ mt->format,
+ mt->singlesample_width0,
+ mt->singlesample_height0,
+ 0 /*num_samples*/);
+ if (!mt->singlesample_mt)
+ goto fail;
+
+ map->singlesample_mt_is_tmp = true;
+ mt->need_downsample = true;
+ }
+
+ intel_miptree_downsample(intel, mt);
+ intel_miptree_map_singlesample(intel, mt->singlesample_mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+ return;
+
+fail:
+ intel_miptree_release_map(mt, level, slice);
+ *out_ptr = NULL;
+ *out_stride = 0;
+}
+
+static void
+intel_miptree_unmap_multisample(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ struct intel_miptree_map *map = mt->level[level].slice[slice].map;
+
+ assert(mt->num_samples > 1);
+
+ if (!map)
+ return;
+
+ intel_miptree_unmap_singlesample(intel, mt->singlesample_mt, level, slice);
+
+ mt->need_downsample = false;
+ if (map->mode & GL_MAP_WRITE_BIT)
+ intel_miptree_upsample(intel, mt);
+
+ if (map->singlesample_mt_is_tmp)
+ intel_miptree_release(&mt->singlesample_mt);
+
+ intel_miptree_release_map(mt, level, slice);
+}
+
+void
+intel_miptree_map(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice,
+ unsigned int x,
+ unsigned int y,
+ unsigned int w,
+ unsigned int h,
+ GLbitfield mode,
+ void **out_ptr,
+ int *out_stride)
+{
+ if (mt->num_samples <= 1)
+ intel_miptree_map_singlesample(intel, mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+ else
+ intel_miptree_map_multisample(intel, mt,
+ level, slice,
+ x, y, w, h,
+ mode,
+ out_ptr, out_stride);
+}
+
+void
+intel_miptree_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ unsigned int level,
+ unsigned int slice)
+{
+ if (mt->num_samples <= 1)
+ intel_miptree_unmap_singlesample(intel, mt, level, slice);
+ else
+ intel_miptree_unmap_multisample(intel, mt, level, slice);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel.c
index d733c5e8745..bc705105d4d 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel.c
@@ -1 +1,167 @@
-../intel/intel_pixel.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/accum.h"
+#include "main/enums.h"
+#include "main/state.h"
+#include "main/bufferobj.h"
+#include "main/context.h"
+#include "swrast/swrast.h"
+
+#include "intel_context.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+static GLenum
+effective_func(GLenum func, bool src_alpha_is_one)
+{
+ if (src_alpha_is_one) {
+ if (func == GL_SRC_ALPHA)
+ return GL_ONE;
+ if (func == GL_ONE_MINUS_SRC_ALPHA)
+ return GL_ZERO;
+ }
+
+ return func;
+}
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glDraw/CopyPixels.
+ */
+bool
+intel_check_blit_fragment_ops(struct gl_context * ctx, bool src_alpha_is_one)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ if (ctx->FragmentProgram._Enabled) {
+ DBG("fallback due to fragment program\n");
+ return false;
+ }
+
+ if (ctx->Color.BlendEnabled &&
+ (effective_func(ctx->Color.Blend[0].SrcRGB, src_alpha_is_one) != GL_ONE ||
+ effective_func(ctx->Color.Blend[0].DstRGB, src_alpha_is_one) != GL_ZERO ||
+ ctx->Color.Blend[0].EquationRGB != GL_FUNC_ADD ||
+ effective_func(ctx->Color.Blend[0].SrcA, src_alpha_is_one) != GL_ONE ||
+ effective_func(ctx->Color.Blend[0].DstA, src_alpha_is_one) != GL_ZERO ||
+ ctx->Color.Blend[0].EquationA != GL_FUNC_ADD)) {
+ DBG("fallback due to blend\n");
+ return false;
+ }
+
+ if (ctx->Texture._EnabledUnits) {
+ DBG("fallback due to texturing\n");
+ return false;
+ }
+
+ if (!(ctx->Color.ColorMask[0][0] &&
+ ctx->Color.ColorMask[0][1] &&
+ ctx->Color.ColorMask[0][2] &&
+ ctx->Color.ColorMask[0][3])) {
+ DBG("fallback due to color masking\n");
+ return false;
+ }
+
+ if (ctx->Color.AlphaEnabled) {
+ DBG("fallback due to alpha\n");
+ return false;
+ }
+
+ if (ctx->Depth.Test) {
+ DBG("fallback due to depth test\n");
+ return false;
+ }
+
+ if (ctx->Fog.Enabled) {
+ DBG("fallback due to fog\n");
+ return false;
+ }
+
+ if (ctx->_ImageTransferState) {
+ DBG("fallback due to image transfer\n");
+ return false;
+ }
+
+ if (ctx->Stencil._Enabled) {
+ DBG("fallback due to image stencil\n");
+ return false;
+ }
+
+ if (ctx->RenderMode != GL_RENDER) {
+ DBG("fallback due to render mode\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* The intel_region struct doesn't really do enough to capture the
+ * format of the pixels in the region. For now this code assumes that
+ * the region is a display surface and hence is either ARGB8888 or
+ * RGB565.
+ * XXX FBO: If we'd pass in the intel_renderbuffer instead of region, we'd
+ * know the buffer's pixel format.
+ *
+ * \param format as given to glDraw/ReadPixels
+ * \param type as given to glDraw/ReadPixels
+ */
+bool
+intel_check_blit_format(struct intel_region * region,
+ GLenum format, GLenum type)
+{
+ if (region->cpp == 4 &&
+ (type == GL_UNSIGNED_INT_8_8_8_8_REV ||
+ type == GL_UNSIGNED_BYTE) && format == GL_BGRA) {
+ return true;
+ }
+
+ if (region->cpp == 2 &&
+ type == GL_UNSIGNED_SHORT_5_6_5_REV && format == GL_BGR) {
+ return true;
+ }
+
+ DBG("%s: bad format for blit (cpp %d, type %s format %s)\n",
+ __FUNCTION__, region->cpp,
+ _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
+
+ return false;
+}
+
+void
+intelInitPixelFuncs(struct dd_function_table *functions)
+{
+ functions->Accum = _mesa_accum;
+ functions->Bitmap = intelBitmap;
+ functions->CopyPixels = intelCopyPixels;
+ functions->DrawPixels = intelDrawPixels;
+ functions->ReadPixels = intelReadPixels;
+}
+
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
index 9085c7b0397..954dfc50b14 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_bitmap.c
@@ -1 +1,340 @@
-../intel/intel_pixel_bitmap.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/colormac.h"
+#include "main/condrender.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/pbo.h"
+#include "main/bufferobj.h"
+#include "main/state.h"
+#include "main/texobj.h"
+#include "main/context.h"
+#include "main/fbobject.h"
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_regions.h"
+#include "intel_buffers.h"
+#include "intel_pixel.h"
+#include "intel_reg.h"
+
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+
+/* Unlike the other intel_pixel_* functions, the expectation here is
+ * that the incoming data is not in a PBO. With the XY_TEXT blit
+ * method, there's no benefit haveing it in a PBO, but we could
+ * implement a path based on XY_MONO_SRC_COPY_BLIT which might benefit
+ * PBO bitmaps. I think they are probably pretty rare though - I
+ * wonder if Xgl uses them?
+ */
+static const GLubyte *map_pbo( struct gl_context *ctx,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ GLubyte *buf;
+
+ if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
+ GL_COLOR_INDEX, GL_BITMAP,
+ INT_MAX, (const GLvoid *) bitmap)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,"glBitmap(invalid PBO access)");
+ return NULL;
+ }
+
+ buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
+ GL_MAP_READ_BIT,
+ unpack->BufferObj);
+ if (!buf) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
+ return NULL;
+ }
+
+ return ADD_POINTERS(buf, bitmap);
+}
+
+static bool test_bit( const GLubyte *src, GLuint bit )
+{
+ return (src[bit/8] & (1<<(bit % 8))) ? 1 : 0;
+}
+
+static void set_bit( GLubyte *dest, GLuint bit )
+{
+ dest[bit/8] |= 1 << (bit % 8);
+}
+
+/* Extract a rectangle's worth of data from the bitmap. Called
+ * per chunk of HW-sized bitmap.
+ */
+static GLuint get_bitmap_rect(GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h,
+ GLubyte *dest,
+ GLuint row_align,
+ bool invert)
+{
+ GLuint src_offset = (x + unpack->SkipPixels) & 0x7;
+ GLuint mask = unpack->LsbFirst ? 0 : 7;
+ GLuint bit = 0;
+ GLint row, col;
+ GLint first, last;
+ GLint incr;
+ GLuint count = 0;
+
+ DBG("%s %d,%d %dx%d bitmap %dx%d skip %d src_offset %d mask %d\n",
+ __FUNCTION__, x,y,w,h,width,height,unpack->SkipPixels, src_offset, mask);
+
+ if (invert) {
+ first = h-1;
+ last = 0;
+ incr = -1;
+ }
+ else {
+ first = 0;
+ last = h-1;
+ incr = 1;
+ }
+
+ /* Require that dest be pre-zero'd.
+ */
+ for (row = first; row != (last+incr); row += incr) {
+ const GLubyte *rowsrc = _mesa_image_address2d(unpack, bitmap,
+ width, height,
+ GL_COLOR_INDEX, GL_BITMAP,
+ y + row, x);
+
+ for (col = 0; col < w; col++, bit++) {
+ if (test_bit(rowsrc, (col + src_offset) ^ mask)) {
+ set_bit(dest, bit ^ 7);
+ count++;
+ }
+ }
+
+ if (row_align)
+ bit = ALIGN(bit, row_align);
+ }
+
+ return count;
+}
+
+/**
+ * Returns the low Y value of the vertical range given, flipped according to
+ * whether the framebuffer is or not.
+ */
+static INLINE int
+y_flip(struct gl_framebuffer *fb, int y, int height)
+{
+ if (_mesa_is_user_fbo(fb))
+ return y;
+ else
+ return fb->Height - y - height;
+}
+
+/*
+ * Render a bitmap.
+ */
+static bool
+do_blit_bitmap( struct gl_context *ctx,
+ GLint dstx, GLint dsty,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLfloat tmpColor[4];
+ GLubyte ubcolor[4];
+ GLuint color;
+ GLsizei bitmap_width = width;
+ GLsizei bitmap_height = height;
+ GLint px, py;
+ GLuint stipple[32];
+ GLint orig_dstx = dstx;
+ GLint orig_dsty = dsty;
+
+ /* Update draw buffer bounds */
+ _mesa_update_state(ctx);
+
+ if (ctx->Depth.Test) {
+ /* The blit path produces incorrect results when depth testing is on.
+ * It seems the blit Z coord is always 1.0 (the far plane) so fragments
+ * will likely be obscured by other, closer geometry.
+ */
+ return false;
+ }
+
+ intel_prepare_render(intel);
+ dst = intel_drawbuf_region(intel);
+
+ if (!dst)
+ return false;
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ bitmap = map_pbo(ctx, width, height, unpack, bitmap);
+ if (bitmap == NULL)
+ return true; /* even though this is an error, we're done */
+ }
+
+ COPY_4V(tmpColor, ctx->Current.RasterColor);
+
+ if (_mesa_need_secondary_color(ctx)) {
+ ADD_3V(tmpColor, tmpColor, ctx->Current.RasterSecondaryColor);
+ }
+
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[0], tmpColor[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[1], tmpColor[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[2], tmpColor[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[3], tmpColor[3]);
+
+ if (dst->cpp == 2)
+ color = PACK_COLOR_565(ubcolor[0], ubcolor[1], ubcolor[2]);
+ else
+ color = PACK_COLOR_8888(ubcolor[3], ubcolor[0], ubcolor[1], ubcolor[2]);
+
+ if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F))
+ return false;
+
+ /* Clip to buffer bounds and scissor. */
+ if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
+ fb->_Xmax, fb->_Ymax,
+ &dstx, &dsty, &width, &height))
+ goto out;
+
+ dsty = y_flip(fb, dsty, height);
+
+#define DY 32
+#define DX 32
+
+ /* Chop it all into chunks that can be digested by hardware: */
+ for (py = 0; py < height; py += DY) {
+ for (px = 0; px < width; px += DX) {
+ int h = MIN2(DY, height - py);
+ int w = MIN2(DX, width - px);
+ GLuint sz = ALIGN(ALIGN(w,8) * h, 64)/8;
+ GLenum logic_op = ctx->Color.ColorLogicOpEnabled ?
+ ctx->Color.LogicOp : GL_COPY;
+
+ assert(sz <= sizeof(stipple));
+ memset(stipple, 0, sz);
+
+ /* May need to adjust this when padding has been introduced in
+ * sz above:
+ *
+ * Have to translate destination coordinates back into source
+ * coordinates.
+ */
+ if (get_bitmap_rect(bitmap_width, bitmap_height, unpack,
+ bitmap,
+ -orig_dstx + (dstx + px),
+ -orig_dsty + y_flip(fb, dsty + py, h),
+ w, h,
+ (GLubyte *)stipple,
+ 8,
+ _mesa_is_winsys_fbo(fb)) == 0)
+ continue;
+
+ if (!intelEmitImmediateColorExpandBlit(intel,
+ dst->cpp,
+ (GLubyte *)stipple,
+ sz,
+ color,
+ dst->pitch,
+ dst->bo,
+ 0,
+ dst->tiling,
+ dstx + px,
+ dsty + py,
+ w, h,
+ logic_op)) {
+ return false;
+ }
+ }
+ }
+out:
+
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
+ intel_batchbuffer_flush(intel);
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ /* done with PBO so unmap it now */
+ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
+ }
+
+ intel_check_front_buffer_rendering(intel);
+
+ return true;
+}
+
+
+/* There are a large number of possible ways to implement bitmap on
+ * this hardware, most of them have some sort of drawback. Here are a
+ * few that spring to mind:
+ *
+ * Blit:
+ * - XY_MONO_SRC_BLT_CMD
+ * - use XY_SETUP_CLIP_BLT for cliprect clipping.
+ * - XY_TEXT_BLT
+ * - XY_TEXT_IMMEDIATE_BLT
+ * - blit per cliprect, subject to maximum immediate data size.
+ * - XY_COLOR_BLT
+ * - per pixel or run of pixels
+ * - XY_PIXEL_BLT
+ * - good for sparse bitmaps
+ *
+ * 3D engine:
+ * - Point per pixel
+ * - Translate bitmap to an alpha texture and render as a quad
+ * - Chop bitmap up into 32x32 squares and render w/polygon stipple.
+ */
+void
+intelBitmap(struct gl_context * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte * pixels)
+{
+ if (!_mesa_check_conditional_render(ctx))
+ return;
+
+ if (do_blit_bitmap(ctx, x, y, width, height,
+ unpack, pixels))
+ return;
+
+ _mesa_meta_Bitmap(ctx, x, y, width, height, unpack, pixels);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_copy.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_copy.c
index ee433605904..682a52d97d2 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_copy.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_copy.c
@@ -1 +1,230 @@
-../intel/intel_pixel_copy.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/image.h"
+#include "main/state.h"
+#include "main/mtypes.h"
+#include "main/condrender.h"
+#include "main/fbobject.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_fbo.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glCopyPixels. Differs from intel_check_blit_fragment_ops in that
+ * we allow Scissor.
+ */
+static bool
+intel_check_copypixel_blit_fragment_ops(struct gl_context * ctx)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ /* Could do logicop with the blitter:
+ */
+ return !(ctx->_ImageTransferState ||
+ ctx->Color.AlphaEnabled ||
+ ctx->Depth.Test ||
+ ctx->Fog.Enabled ||
+ ctx->Stencil._Enabled ||
+ !ctx->Color.ColorMask[0][0] ||
+ !ctx->Color.ColorMask[0][1] ||
+ !ctx->Color.ColorMask[0][2] ||
+ !ctx->Color.ColorMask[0][3] ||
+ ctx->Texture._EnabledUnits ||
+ ctx->FragmentProgram._Enabled ||
+ ctx->Color.BlendEnabled);
+}
+
+
+/**
+ * CopyPixels with the blitter. Don't support zooming, pixel transfer, etc.
+ */
+static bool
+do_blit_copypixels(struct gl_context * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint dstx, GLint dsty, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ struct gl_framebuffer *read_fb = ctx->ReadBuffer;
+ GLint orig_dstx;
+ GLint orig_dsty;
+ GLint orig_srcx;
+ GLint orig_srcy;
+ bool flip = false;
+ struct intel_renderbuffer *draw_irb = NULL;
+ struct intel_renderbuffer *read_irb = NULL;
+ gl_format read_format, draw_format;
+
+ /* Update draw buffer bounds */
+ _mesa_update_state(ctx);
+
+ switch (type) {
+ case GL_COLOR:
+ if (fb->_NumColorDrawBuffers != 1) {
+ fallback_debug("glCopyPixels() fallback: MRT\n");
+ return false;
+ }
+
+ draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
+ read_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ draw_irb = intel_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
+ read_irb =
+ intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
+ break;
+ case GL_DEPTH:
+ fallback_debug("glCopyPixels() fallback: GL_DEPTH\n");
+ return false;
+ case GL_STENCIL:
+ fallback_debug("glCopyPixels() fallback: GL_STENCIL\n");
+ return false;
+ default:
+ fallback_debug("glCopyPixels(): Unknown type\n");
+ return false;
+ }
+
+ if (!draw_irb) {
+ fallback_debug("glCopyPixels() fallback: missing draw buffer\n");
+ return false;
+ }
+
+ if (!read_irb) {
+ fallback_debug("glCopyPixels() fallback: missing read buffer\n");
+ return false;
+ }
+
+ read_format = intel_rb_format(read_irb);
+ draw_format = intel_rb_format(draw_irb);
+
+ if (draw_format != read_format &&
+ !(draw_format == MESA_FORMAT_XRGB8888 &&
+ read_format == MESA_FORMAT_ARGB8888)) {
+ fallback_debug("glCopyPixels() fallback: mismatched formats (%s -> %s\n",
+ _mesa_get_format_name(read_format),
+ _mesa_get_format_name(draw_format));
+ return false;
+ }
+
+ /* Copypixels can be more than a straight copy. Ensure all the
+ * extra operations are disabled:
+ */
+ if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
+ ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
+ return false;
+
+ intel_prepare_render(intel);
+
+ intel_flush(&intel->ctx);
+
+ /* Clip to destination buffer. */
+ orig_dstx = dstx;
+ orig_dsty = dsty;
+ if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
+ fb->_Xmax, fb->_Ymax,
+ &dstx, &dsty, &width, &height))
+ goto out;
+ /* Adjust src coords for our post-clipped destination origin */
+ srcx += dstx - orig_dstx;
+ srcy += dsty - orig_dsty;
+
+ /* Clip to source buffer. */
+ orig_srcx = srcx;
+ orig_srcy = srcy;
+ if (!_mesa_clip_to_region(0, 0,
+ read_fb->Width, read_fb->Height,
+ &srcx, &srcy, &width, &height))
+ goto out;
+ /* Adjust dst coords for our post-clipped source origin */
+ dstx += srcx - orig_srcx;
+ dsty += srcy - orig_srcy;
+
+ /* Flip dest Y if it's a window system framebuffer. */
+ if (_mesa_is_winsys_fbo(fb)) {
+ /* copypixels to a window system framebuffer */
+ dsty = fb->Height - dsty - height;
+ flip = !flip;
+ }
+
+ /* Flip source Y if it's a window system framebuffer. */
+ if (_mesa_is_winsys_fbo(read_fb)) {
+ srcy = read_fb->Height - srcy - height;
+ flip = !flip;
+ }
+
+ srcx += read_irb->draw_x;
+ srcy += read_irb->draw_y;
+ dstx += draw_irb->draw_x;
+ dsty += draw_irb->draw_y;
+
+ if (!intel_region_copy(intel,
+ draw_irb->mt->region, 0, dstx, dsty,
+ read_irb->mt->region, 0, srcx, srcy,
+ width, height, flip,
+ ctx->Color.ColorLogicOpEnabled ?
+ ctx->Color.LogicOp : GL_COPY)) {
+ DBG("%s: blit failure\n", __FUNCTION__);
+ return false;
+ }
+
+out:
+ intel_check_front_buffer_rendering(intel);
+
+ DBG("%s: success\n", __FUNCTION__);
+ return true;
+}
+
+
+void
+intelCopyPixels(struct gl_context * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint destx, GLint desty, GLenum type)
+{
+ DBG("%s\n", __FUNCTION__);
+
+ if (!_mesa_check_conditional_render(ctx))
+ return;
+
+ if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
+ return;
+
+ /* this will use swrast if needed */
+ _mesa_meta_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_draw.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_draw.c
index 8431a24edfc..2ec7ed8e269 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_draw.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_draw.c
@@ -1 +1,58 @@
-../intel/intel_pixel_draw.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/mtypes.h"
+#include "main/teximage.h"
+#include "main/texobj.h"
+#include "main/texstate.h"
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_pixel.h"
+
+void
+intelDrawPixels(struct gl_context * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format,
+ GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels)
+{
+ if (format == GL_STENCIL_INDEX) {
+ _swrast_DrawPixels(ctx, x, y, width, height, format, type,
+ unpack, pixels);
+ return;
+ }
+
+ _mesa_meta_DrawPixels(ctx, x, y, width, height, format, type,
+ unpack, pixels);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_read.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_read.c
index cc4589f4d42..ab4e581c400 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_read.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_pixel_read.c
@@ -1 +1,205 @@
-../intel/intel_pixel_read.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/fbobject.h"
+#include "main/image.h"
+#include "main/bufferobj.h"
+#include "main/readpix.h"
+#include "main/state.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_buffer_objects.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+/* For many applications, the new ability to pull the source buffers
+ * back out of the GTT and then do the packing/conversion operations
+ * in software will be as much of an improvement as trying to get the
+ * blitter and/or texture engine to do the work.
+ *
+ * This step is gated on private backbuffers.
+ *
+ * Obviously the frontbuffer can't be pulled back, so that is either
+ * an argument for blit/texture readpixels, or for blitting to a
+ * temporary and then pulling that back.
+ *
+ * When the destination is a pbo, however, it's not clear if it is
+ * ever going to be pulled to main memory (though the access param
+ * will be a good hint). So it sounds like we do want to be able to
+ * choose between blit/texture implementation on the gpu and pullback
+ * and cpu-based copying.
+ *
+ * Unless you can magically turn client memory into a PBO for the
+ * duration of this call, there will be a cpu-based copying step in
+ * any case.
+ */
+
+static bool
+do_blit_readpixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *src = intel_readbuf_region(intel);
+ struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
+ GLuint dst_offset;
+ GLuint rowLength;
+ drm_intel_bo *dst_buffer;
+ bool all;
+ GLint dst_x, dst_y;
+ GLuint dirty;
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (!src)
+ return false;
+
+ if (!_mesa_is_bufferobj(pack->BufferObj)) {
+ /* PBO only for now:
+ */
+ DBG("%s - not PBO\n", __FUNCTION__);
+ return false;
+ }
+
+
+ if (ctx->_ImageTransferState ||
+ !intel_check_blit_format(src, format, type)) {
+ DBG("%s - bad format for blit\n", __FUNCTION__);
+ return false;
+ }
+
+ if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
+ DBG("%s: bad packing params\n", __FUNCTION__);
+ return false;
+ }
+
+ if (pack->RowLength > 0)
+ rowLength = pack->RowLength;
+ else
+ rowLength = width;
+
+ if (pack->Invert) {
+ DBG("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
+ return false;
+ }
+ else {
+ if (_mesa_is_winsys_fbo(ctx->ReadBuffer))
+ rowLength = -rowLength;
+ }
+
+ dst_offset = (GLintptr)pixels;
+ dst_offset += _mesa_image_offset(2, pack, width, height,
+ format, type, 0, 0, 0);
+
+ if (!_mesa_clip_copytexsubimage(ctx,
+ &dst_x, &dst_y,
+ &x, &y,
+ &width, &height)) {
+ return true;
+ }
+
+ dirty = intel->front_buffer_dirty;
+ intel_prepare_render(intel);
+ intel->front_buffer_dirty = dirty;
+
+ all = (width * height * src->cpp == dst->Base.Size &&
+ x == 0 && dst_offset == 0);
+
+ dst_x = 0;
+ dst_y = 0;
+
+ dst_buffer = intel_bufferobj_buffer(intel, dst,
+ all ? INTEL_WRITE_FULL :
+ INTEL_WRITE_PART);
+
+ if (_mesa_is_winsys_fbo(ctx->ReadBuffer))
+ y = ctx->ReadBuffer->Height - (y + height);
+
+ if (!intelEmitCopyBlit(intel,
+ src->cpp,
+ src->pitch, src->bo, 0, src->tiling,
+ rowLength, dst_buffer, dst_offset, false,
+ x, y,
+ dst_x, dst_y,
+ width, height,
+ GL_COPY)) {
+ return false;
+ }
+
+ DBG("%s - DONE\n", __FUNCTION__);
+
+ return true;
+}
+
+void
+intelReadPixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ bool dirty;
+
+ intel_flush_rendering_to_batch(ctx);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (do_blit_readpixels
+ (ctx, x, y, width, height, format, type, pack, pixels))
+ return;
+
+ /* glReadPixels() wont dirty the front buffer, so reset the dirty
+ * flag after calling intel_prepare_render(). */
+ dirty = intel->front_buffer_dirty;
+ intel_prepare_render(intel);
+ intel->front_buffer_dirty = dirty;
+
+ fallback_debug("%s: fallback to swrast\n", __FUNCTION__);
+
+ /* Update Mesa state before calling _mesa_readpixels().
+ * XXX this may not be needed since ReadPixels no longer uses the
+ * span code.
+ */
+
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);
+
+ /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
+ intel->front_buffer_dirty = dirty;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_regions.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_regions.c
index 89b2f15c10f..7cb008c2f71 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_regions.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_regions.c
@@ -1 +1,473 @@
-../intel/intel_regions.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Provide additional functionality on top of bufmgr buffers:
+ * - 2d semantics and blit operations
+ * - refcounting of buffers for multiple images in a buffer.
+ * - refcounting of buffer mappings.
+ * - some logic for moving the buffers to the best memory pools for
+ * given operations.
+ *
+ * Most of this is to make it easier to implement the fixed-layout
+ * mipmap tree required by intel hardware in the face of GL's
+ * programming interface where each image can be specifed in random
+ * order and it isn't clear what layout the tree should have until the
+ * last moment.
+ */
+
+#include <sys/ioctl.h>
+#include <errno.h>
+
+#include "main/hash.h"
+#include "intel_context.h"
+#include "intel_regions.h"
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "intel_bufmgr.h"
+#include "intel_batchbuffer.h"
+
+#define FILE_DEBUG_FLAG DEBUG_REGION
+
+/* This should be set to the maximum backtrace size desired.
+ * Set it to 0 to disable backtrace debugging.
+ */
+#define DEBUG_BACKTRACE_SIZE 0
+
+#if DEBUG_BACKTRACE_SIZE == 0
+/* Use the standard debug output */
+#define _DBG(...) DBG(__VA_ARGS__)
+#else
+/* Use backtracing debug output */
+#define _DBG(...) {debug_backtrace(); DBG(__VA_ARGS__);}
+
+/* Backtracing debug support */
+#include <execinfo.h>
+
+static void
+debug_backtrace(void)
+{
+ void *trace[DEBUG_BACKTRACE_SIZE];
+ char **strings = NULL;
+ int traceSize;
+ register int i;
+
+ traceSize = backtrace(trace, DEBUG_BACKTRACE_SIZE);
+ strings = backtrace_symbols(trace, traceSize);
+ if (strings == NULL) {
+ DBG("no backtrace:");
+ return;
+ }
+
+ /* Spit out all the strings with a colon separator. Ignore
+ * the first, since we don't really care about the call
+ * to debug_backtrace() itself. Skip until the final "/" in
+ * the trace to avoid really long lines.
+ */
+ for (i = 1; i < traceSize; i++) {
+ char *p = strings[i], *slash = strings[i];
+ while (*p) {
+ if (*p++ == '/') {
+ slash = p;
+ }
+ }
+
+ DBG("%s:", slash);
+ }
+
+ /* Free up the memory, and we're done */
+ free(strings);
+}
+
+#endif
+
+
+
+/* XXX: Thread safety?
+ */
+void *
+intel_region_map(struct intel_context *intel, struct intel_region *region,
+ GLbitfield mode)
+{
+ /* We have the region->map_refcount controlling mapping of the BO because
+ * in software fallbacks we may end up mapping the same buffer multiple
+ * times on Mesa's behalf, so we refcount our mappings to make sure that
+ * the pointer stays valid until the end of the unmap chain. However, we
+ * must not emit any batchbuffers between the start of mapping and the end
+ * of unmapping, or further use of the map will be incoherent with the GPU
+ * rendering done by that batchbuffer. Hence we assert in
+ * intel_batchbuffer_flush() that that doesn't happen, which means that the
+ * flush is only needed on first map of the buffer.
+ */
+
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ if (drm_intel_bo_busy(region->bo)) {
+ perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
+ }
+ }
+
+ _DBG("%s %p\n", __FUNCTION__, region);
+ if (!region->map_refcount) {
+ intel_flush(&intel->ctx);
+
+ if (region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_map_gtt(region->bo);
+ else
+ drm_intel_bo_map(region->bo, true);
+
+ region->map = region->bo->virtual;
+ }
+ if (region->map) {
+ intel->num_mapped_regions++;
+ region->map_refcount++;
+ }
+
+ return region->map;
+}
+
+void
+intel_region_unmap(struct intel_context *intel, struct intel_region *region)
+{
+ _DBG("%s %p\n", __FUNCTION__, region);
+ if (!--region->map_refcount) {
+ if (region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_unmap_gtt(region->bo);
+ else
+ drm_intel_bo_unmap(region->bo);
+
+ region->map = NULL;
+ --intel->num_mapped_regions;
+ assert(intel->num_mapped_regions >= 0);
+ }
+}
+
+static struct intel_region *
+intel_region_alloc_internal(struct intel_screen *screen,
+ GLuint cpp,
+ GLuint width, GLuint height, GLuint pitch,
+ uint32_t tiling, drm_intel_bo *buffer)
+{
+ struct intel_region *region;
+
+ region = calloc(sizeof(*region), 1);
+ if (region == NULL)
+ return region;
+
+ region->cpp = cpp;
+ region->width = width;
+ region->height = height;
+ region->pitch = pitch;
+ region->refcount = 1;
+ region->bo = buffer;
+ region->tiling = tiling;
+ region->screen = screen;
+
+ _DBG("%s <-- %p\n", __FUNCTION__, region);
+ return region;
+}
+
+struct intel_region *
+intel_region_alloc(struct intel_screen *screen,
+ uint32_t tiling,
+ GLuint cpp, GLuint width, GLuint height,
+ bool expect_accelerated_upload)
+{
+ drm_intel_bo *buffer;
+ unsigned long flags = 0;
+ unsigned long aligned_pitch;
+ struct intel_region *region;
+
+ if (expect_accelerated_upload)
+ flags |= BO_ALLOC_FOR_RENDER;
+
+ buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "region",
+ width, height, cpp,
+ &tiling, &aligned_pitch, flags);
+ if (buffer == NULL)
+ return NULL;
+
+ region = intel_region_alloc_internal(screen, cpp, width, height,
+ aligned_pitch / cpp, tiling, buffer);
+ if (region == NULL) {
+ drm_intel_bo_unreference(buffer);
+ return NULL;
+ }
+
+ return region;
+}
+
+bool
+intel_region_flink(struct intel_region *region, uint32_t *name)
+{
+ if (region->name == 0) {
+ if (drm_intel_bo_flink(region->bo, &region->name))
+ return false;
+
+ _mesa_HashInsert(region->screen->named_regions,
+ region->name, region);
+ }
+
+ *name = region->name;
+
+ return true;
+}
+
+struct intel_region *
+intel_region_alloc_for_handle(struct intel_screen *screen,
+ GLuint cpp,
+ GLuint width, GLuint height, GLuint pitch,
+ GLuint handle, const char *name)
+{
+ struct intel_region *region, *dummy;
+ drm_intel_bo *buffer;
+ int ret;
+ uint32_t bit_6_swizzle, tiling;
+
+ region = _mesa_HashLookup(screen->named_regions, handle);
+ if (region != NULL) {
+ dummy = NULL;
+ if (region->width != width || region->height != height ||
+ region->cpp != cpp || region->pitch != pitch) {
+ fprintf(stderr,
+ "Region for name %d already exists but is not compatible\n",
+ handle);
+ return NULL;
+ }
+ intel_region_reference(&dummy, region);
+ return dummy;
+ }
+
+ buffer = intel_bo_gem_create_from_name(screen->bufmgr, name, handle);
+ if (buffer == NULL)
+ return NULL;
+ ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle);
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
+ handle, name, strerror(-ret));
+ drm_intel_bo_unreference(buffer);
+ return NULL;
+ }
+
+ region = intel_region_alloc_internal(screen, cpp,
+ width, height, pitch, tiling, buffer);
+ if (region == NULL) {
+ drm_intel_bo_unreference(buffer);
+ return NULL;
+ }
+
+ region->name = handle;
+ _mesa_HashInsert(screen->named_regions, handle, region);
+
+ return region;
+}
+
+void
+intel_region_reference(struct intel_region **dst, struct intel_region *src)
+{
+ _DBG("%s: %p(%d) -> %p(%d)\n", __FUNCTION__,
+ *dst, *dst ? (*dst)->refcount : 0, src, src ? src->refcount : 0);
+
+ if (src != *dst) {
+ if (*dst)
+ intel_region_release(dst);
+
+ if (src)
+ src->refcount++;
+ *dst = src;
+ }
+}
+
+void
+intel_region_release(struct intel_region **region_handle)
+{
+ struct intel_region *region = *region_handle;
+
+ if (region == NULL) {
+ _DBG("%s NULL\n", __FUNCTION__);
+ return;
+ }
+
+ _DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
+
+ ASSERT(region->refcount > 0);
+ region->refcount--;
+
+ if (region->refcount == 0) {
+ assert(region->map_refcount == 0);
+
+ drm_intel_bo_unreference(region->bo);
+
+ if (region->name > 0)
+ _mesa_HashRemove(region->screen->named_regions, region->name);
+
+ free(region);
+ }
+ *region_handle = NULL;
+}
+
+/*
+ * XXX Move this into core Mesa?
+ */
+void
+_mesa_copy_rect(GLubyte * dst,
+ GLuint cpp,
+ GLuint dst_pitch,
+ GLuint dst_x,
+ GLuint dst_y,
+ GLuint width,
+ GLuint height,
+ const GLubyte * src,
+ GLuint src_pitch, GLuint src_x, GLuint src_y)
+{
+ GLuint i;
+
+ dst_pitch *= cpp;
+ src_pitch *= cpp;
+ dst += dst_x * cpp;
+ src += src_x * cpp;
+ dst += dst_y * dst_pitch;
+ src += src_y * src_pitch;
+ width *= cpp;
+
+ if (width == dst_pitch && width == src_pitch)
+ memcpy(dst, src, height * width);
+ else {
+ for (i = 0; i < height; i++) {
+ memcpy(dst, src, width);
+ dst += dst_pitch;
+ src += src_pitch;
+ }
+ }
+}
+
+/* Copy rectangular sub-regions. Need better logic about when to
+ * push buffers into AGP - will currently do so whenever possible.
+ */
+bool
+intel_region_copy(struct intel_context *intel,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ struct intel_region *src,
+ GLuint src_offset,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height,
+ bool flip,
+ GLenum logicop)
+{
+ uint32_t src_pitch = src->pitch;
+
+ _DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return false;
+
+ assert(src->cpp == dst->cpp);
+
+ if (flip)
+ src_pitch = -src_pitch;
+
+ return intelEmitCopyBlit(intel,
+ dst->cpp,
+ src_pitch, src->bo, src_offset, src->tiling,
+ dst->pitch, dst->bo, dst_offset, dst->tiling,
+ srcx, srcy, dstx, dsty, width, height,
+ logicop);
+}
+
+/**
+ * This function computes masks that may be used to select the bits of the X
+ * and Y coordinates that indicate the offset within a tile. If the region is
+ * untiled, the masks are set to 0.
+ */
+void
+intel_region_get_tile_masks(struct intel_region *region,
+ uint32_t *mask_x, uint32_t *mask_y,
+ bool map_stencil_as_y_tiled)
+{
+ int cpp = region->cpp;
+ uint32_t tiling = region->tiling;
+
+ if (map_stencil_as_y_tiled)
+ tiling = I915_TILING_Y;
+
+ switch (tiling) {
+ default:
+ assert(false);
+ case I915_TILING_NONE:
+ *mask_x = *mask_y = 0;
+ break;
+ case I915_TILING_X:
+ *mask_x = 512 / cpp - 1;
+ *mask_y = 7;
+ break;
+ case I915_TILING_Y:
+ *mask_x = 128 / cpp - 1;
+ *mask_y = 31;
+ break;
+ }
+}
+
+/**
+ * Compute the offset (in bytes) from the start of the region to the given x
+ * and y coordinate. For tiled regions, caller must ensure that x and y are
+ * multiples of the tile size.
+ */
+uint32_t
+intel_region_get_aligned_offset(struct intel_region *region, uint32_t x,
+ uint32_t y, bool map_stencil_as_y_tiled)
+{
+ int cpp = region->cpp;
+ uint32_t pitch = region->pitch * cpp;
+ uint32_t tiling = region->tiling;
+
+ if (map_stencil_as_y_tiled) {
+ tiling = I915_TILING_Y;
+
+ /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
+ * gets transformed into a 32-high Y-tile. Accordingly, the pitch of
+ * the resulting region is twice the pitch of the original region, since
+ * each row in the Y-tiled view corresponds to two rows in the actual
+ * W-tiled surface. So we need to correct the pitch before computing
+ * the offsets.
+ */
+ pitch *= 2;
+ }
+
+ switch (tiling) {
+ default:
+ assert(false);
+ case I915_TILING_NONE:
+ return y * pitch + x * cpp;
+ case I915_TILING_X:
+ assert((x % (512 / cpp)) == 0);
+ assert((y % 8) == 0);
+ return y * pitch + x / (512 / cpp) * 4096;
+ case I915_TILING_Y:
+ assert((x % (128 / cpp)) == 0);
+ assert((y % 32) == 0);
+ return y * pitch + x / (128 / cpp) * 4096;
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_resolve_map.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_resolve_map.c
index 77e50fbaea4..04b5c942432 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_resolve_map.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_resolve_map.c
@@ -1 +1,111 @@
-../intel/intel_resolve_map.c \ No newline at end of file
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "intel_resolve_map.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+/**
+ * \brief Set that the miptree slice at (level, layer) needs a resolve.
+ *
+ * If a map element already exists with the given key, then the value is
+ * changed to the given value of \c need.
+ */
+void
+intel_resolve_map_set(struct intel_resolve_map *head,
+ uint32_t level,
+ uint32_t layer,
+ enum gen6_hiz_op need)
+{
+ struct intel_resolve_map **tail = &head->next;
+ struct intel_resolve_map *prev = head;
+
+ while (*tail) {
+ if ((*tail)->level == level && (*tail)->layer == layer) {
+ (*tail)->need = need;
+ return;
+ }
+ prev = *tail;
+ tail = &(*tail)->next;
+ }
+
+ *tail = malloc(sizeof(**tail));
+ (*tail)->prev = prev;
+ (*tail)->next = NULL;
+ (*tail)->level = level;
+ (*tail)->layer = layer;
+ (*tail)->need = need;
+}
+
+/**
+ * \brief Get an element from the map.
+ * \return null if element is not contained in map.
+ */
+struct intel_resolve_map*
+intel_resolve_map_get(struct intel_resolve_map *head,
+ uint32_t level,
+ uint32_t layer)
+{
+ struct intel_resolve_map *item = head->next;
+
+ while (item) {
+ if (item->level == level && item->layer == layer)
+ break;
+ else
+ item = item->next;
+ }
+
+ return item;
+}
+
+/**
+ * \brief Remove and free an element from the map.
+ */
+void
+intel_resolve_map_remove(struct intel_resolve_map *elem)
+{
+ if (elem->prev)
+ elem->prev->next = elem->next;
+ if (elem->next)
+ elem->next->prev = elem->prev;
+ free(elem);
+}
+
+/**
+ * \brief Remove and free all elements of the map.
+ */
+void
+intel_resolve_map_clear(struct intel_resolve_map *head)
+{
+ struct intel_resolve_map *next = head->next;
+ struct intel_resolve_map *trash;
+
+ while (next) {
+ trash = next;
+ next = next->next;
+ free(trash);
+ }
+
+ head->next = NULL;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_screen.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_screen.c
index f2db48272b9..00a1b0122ec 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_screen.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_screen.c
@@ -1 +1,1204 @@
-../intel/intel_screen.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <errno.h>
+#include <time.h>
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/hash.h"
+#include "main/fbobject.h"
+#include "main/mfeatures.h"
+#include "main/version.h"
+#include "swrast/s_renderbuffer.h"
+
+#include "utils.h"
+#include "xmlpool.h"
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN
+ DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
+ /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
+ * DRI_CONF_BO_REUSE_ALL
+ */
+ DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
+ DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
+ DRI_CONF_ENUM(0, "Disable buffer object reuse")
+ DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
+ DRI_CONF_DESC_END
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(texture_tiling, bool, true)
+ DRI_CONF_DESC(en, "Enable texture tiling")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(hiz, bool, true)
+ DRI_CONF_DESC(en, "Enable Hierarchical Z on gen6+")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(early_z, bool, false)
+ DRI_CONF_DESC(en, "Enable early Z in classic mode (unstable, 945-only).")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(fragment_shader, bool, true)
+ DRI_CONF_DESC(en, "Enable limited ARB_fragment_shader support on 915/945.")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_QUALITY
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_ALLOW_LARGE_TEXTURES(2)
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_DEBUG
+ DRI_CONF_NO_RAST(false)
+ DRI_CONF_ALWAYS_FLUSH_BATCH(false)
+ DRI_CONF_ALWAYS_FLUSH_CACHE(false)
+ DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN(false)
+ DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED(false)
+
+ DRI_CONF_OPT_BEGIN(stub_occlusion_query, bool, false)
+ DRI_CONF_DESC(en, "Enable stub ARB_occlusion_query support on 915/945.")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(shader_precompile, bool, true)
+ DRI_CONF_DESC(en, "Perform code generation at shader link time.")
+ DRI_CONF_OPT_END
+ DRI_CONF_SECTION_END
+DRI_CONF_END;
+
+const GLuint __driNConfigOptions = 15;
+
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_bufmgr.h"
+#include "intel_chipset.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_screen.h"
+#include "intel_tex.h"
+#include "intel_regions.h"
+
+#include "i915_drm.h"
+
+#ifdef USE_NEW_INTERFACE
+static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
+#endif /*USE_NEW_INTERFACE */
+
+/**
+ * For debugging purposes, this returns a time in seconds.
+ */
+double
+get_time(void)
+{
+ struct timespec tp;
+
+ clock_gettime(CLOCK_MONOTONIC, &tp);
+
+ return tp.tv_sec + tp.tv_nsec / 1000000000.0;
+}
+
+void
+aub_dump_bmp(struct gl_context *ctx)
+{
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct intel_renderbuffer *irb =
+ intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb && irb->mt) {
+ enum aub_dump_bmp_format format;
+
+ switch (irb->Base.Base.Format) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ format = AUB_DUMP_BMP_FORMAT_ARGB_8888;
+ break;
+ default:
+ continue;
+ }
+
+ drm_intel_gem_bo_aub_dump_bmp(irb->mt->region->bo,
+ irb->draw_x,
+ irb->draw_y,
+ irb->Base.Base.Width,
+ irb->Base.Base.Height,
+ format,
+ irb->mt->region->pitch *
+ irb->mt->region->cpp,
+ 0);
+ }
+ }
+}
+
+static const __DRItexBufferExtension intelTexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ intelSetTexBuffer,
+ intelSetTexBuffer2,
+};
+
+static void
+intelDRI2Flush(__DRIdrawable *drawable)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel = intel_context(ctx);
+ if (intel == NULL)
+ return;
+
+ if (intel->gen < 4)
+ INTEL_FIREVERTICES(intel);
+
+ intel_downsample_for_dri2_flush(intel, drawable);
+ intel->need_throttle = true;
+
+ if (intel->batch.used)
+ intel_batchbuffer_flush(intel);
+
+ if (INTEL_DEBUG & DEBUG_AUB) {
+ aub_dump_bmp(ctx);
+ }
+}
+
+static const struct __DRI2flushExtensionRec intelFlushExtension = {
+ { __DRI2_FLUSH, __DRI2_FLUSH_VERSION },
+ intelDRI2Flush,
+ dri2InvalidateDrawable,
+};
+
+struct intel_image_format intel_image_formats[] = {
+ { __DRI_IMAGE_FOURCC_ARGB8888, __DRI_IMAGE_COMPONENTS_RGBA, 1,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } },
+
+ { __DRI_IMAGE_FOURCC_XRGB8888, __DRI_IMAGE_COMPONENTS_RGB, 1,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888, 4 }, } },
+
+ { __DRI_IMAGE_FOURCC_YUV410, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 2, 2, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV411, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 2, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV420, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 1, 1, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV422, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 1, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_YUV444, __DRI_IMAGE_COMPONENTS_Y_U_V, 3,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 2, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 } } },
+
+ { __DRI_IMAGE_FOURCC_NV12, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88, 2 } } },
+
+ { __DRI_IMAGE_FOURCC_NV16, __DRI_IMAGE_COMPONENTS_Y_UV, 2,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8, 1 },
+ { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88, 2 } } },
+
+ /* For YUYV buffers, we set up two overlapping DRI images and treat
+ * them as planar buffers in the compositors. Plane 0 is GR88 and
+ * samples YU or YV pairs and places Y into the R component, while
+ * plane 1 is ARGB and samples YUYV clusters and places pairs and
+ * places U into the G component and V into A. This lets the
+ * texture sampler interpolate the Y components correctly when
+ * sampling from plane 0, and interpolate U and V correctly when
+ * sampling from plane 1. */
+ { __DRI_IMAGE_FOURCC_YUYV, __DRI_IMAGE_COMPONENTS_Y_XUXV, 2,
+ { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88, 2 },
+ { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888, 4 } } }
+};
+
+static __DRIimage *
+intel_allocate_image(int dri_format, void *loaderPrivate)
+{
+ __DRIimage *image;
+
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ image->dri_format = dri_format;
+ image->offset = 0;
+
+ switch (dri_format) {
+ case __DRI_IMAGE_FORMAT_RGB565:
+ image->format = MESA_FORMAT_RGB565;
+ break;
+ case __DRI_IMAGE_FORMAT_XRGB8888:
+ image->format = MESA_FORMAT_XRGB8888;
+ break;
+ case __DRI_IMAGE_FORMAT_ARGB8888:
+ image->format = MESA_FORMAT_ARGB8888;
+ break;
+ case __DRI_IMAGE_FORMAT_ABGR8888:
+ image->format = MESA_FORMAT_RGBA8888_REV;
+ break;
+ case __DRI_IMAGE_FORMAT_XBGR8888:
+ image->format = MESA_FORMAT_RGBX8888_REV;
+ break;
+ case __DRI_IMAGE_FORMAT_R8:
+ image->format = MESA_FORMAT_R8;
+ break;
+ case __DRI_IMAGE_FORMAT_GR88:
+ image->format = MESA_FORMAT_GR88;
+ break;
+ case __DRI_IMAGE_FORMAT_NONE:
+ image->format = MESA_FORMAT_NONE;
+ break;
+ default:
+ free(image);
+ return NULL;
+ }
+
+ image->internal_format = _mesa_get_format_base_format(image->format);
+ image->data = loaderPrivate;
+
+ return image;
+}
+
+static __DRIimage *
+intel_create_image_from_name(__DRIscreen *screen,
+ int width, int height, int format,
+ int name, int pitch, void *loaderPrivate)
+{
+ struct intel_screen *intelScreen = screen->driverPrivate;
+ __DRIimage *image;
+ int cpp;
+
+ image = intel_allocate_image(format, loaderPrivate);
+ if (image->format == MESA_FORMAT_NONE)
+ cpp = 1;
+ else
+ cpp = _mesa_get_format_bytes(image->format);
+ image->region = intel_region_alloc_for_handle(intelScreen,
+ cpp, width, height,
+ pitch, name, "image");
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ return image;
+}
+
+static __DRIimage *
+intel_create_image_from_renderbuffer(__DRIcontext *context,
+ int renderbuffer, void *loaderPrivate)
+{
+ __DRIimage *image;
+ struct intel_context *intel = context->driverPrivate;
+ struct gl_renderbuffer *rb;
+ struct intel_renderbuffer *irb;
+
+ rb = _mesa_lookup_renderbuffer(&intel->ctx, renderbuffer);
+ if (!rb) {
+ _mesa_error(&intel->ctx,
+ GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
+ return NULL;
+ }
+
+ irb = intel_renderbuffer(rb);
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ image->internal_format = rb->InternalFormat;
+ image->format = rb->Format;
+ image->offset = 0;
+ image->data = loaderPrivate;
+ intel_region_reference(&image->region, irb->mt->region);
+
+ switch (image->format) {
+ case MESA_FORMAT_RGB565:
+ image->dri_format = __DRI_IMAGE_FORMAT_RGB565;
+ break;
+ case MESA_FORMAT_XRGB8888:
+ image->dri_format = __DRI_IMAGE_FORMAT_XRGB8888;
+ break;
+ case MESA_FORMAT_ARGB8888:
+ image->dri_format = __DRI_IMAGE_FORMAT_ARGB8888;
+ break;
+ case MESA_FORMAT_RGBA8888_REV:
+ image->dri_format = __DRI_IMAGE_FORMAT_ABGR8888;
+ break;
+ case MESA_FORMAT_R8:
+ image->dri_format = __DRI_IMAGE_FORMAT_R8;
+ break;
+ case MESA_FORMAT_RG88:
+ image->dri_format = __DRI_IMAGE_FORMAT_GR88;
+ break;
+ }
+
+ return image;
+}
+
+static void
+intel_destroy_image(__DRIimage *image)
+{
+ intel_region_release(&image->region);
+ FREE(image);
+}
+
+static __DRIimage *
+intel_create_image(__DRIscreen *screen,
+ int width, int height, int format,
+ unsigned int use,
+ void *loaderPrivate)
+{
+ __DRIimage *image;
+ struct intel_screen *intelScreen = screen->driverPrivate;
+ uint32_t tiling;
+ int cpp;
+
+ tiling = I915_TILING_X;
+ if (use & __DRI_IMAGE_USE_CURSOR) {
+ if (width != 64 || height != 64)
+ return NULL;
+ tiling = I915_TILING_NONE;
+ }
+
+ image = intel_allocate_image(format, loaderPrivate);
+ cpp = _mesa_get_format_bytes(image->format);
+ image->region =
+ intel_region_alloc(intelScreen, tiling, cpp, width, height, true);
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ return image;
+}
+
+static GLboolean
+intel_query_image(__DRIimage *image, int attrib, int *value)
+{
+ switch (attrib) {
+ case __DRI_IMAGE_ATTRIB_STRIDE:
+ *value = image->region->pitch * image->region->cpp;
+ return true;
+ case __DRI_IMAGE_ATTRIB_HANDLE:
+ *value = image->region->bo->handle;
+ return true;
+ case __DRI_IMAGE_ATTRIB_NAME:
+ return intel_region_flink(image->region, (uint32_t *) value);
+ case __DRI_IMAGE_ATTRIB_FORMAT:
+ *value = image->dri_format;
+ return true;
+ case __DRI_IMAGE_ATTRIB_WIDTH:
+ *value = image->region->width;
+ return true;
+ case __DRI_IMAGE_ATTRIB_HEIGHT:
+ *value = image->region->height;
+ return true;
+ case __DRI_IMAGE_ATTRIB_COMPONENTS:
+ if (image->planar_format == NULL)
+ return false;
+ *value = image->planar_format->components;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static __DRIimage *
+intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
+{
+ __DRIimage *image;
+
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ intel_region_reference(&image->region, orig_image->region);
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ image->internal_format = orig_image->internal_format;
+ image->planar_format = orig_image->planar_format;
+ image->dri_format = orig_image->dri_format;
+ image->format = orig_image->format;
+ image->offset = orig_image->offset;
+ image->data = loaderPrivate;
+
+ memcpy(image->strides, orig_image->strides, sizeof(image->strides));
+ memcpy(image->offsets, orig_image->offsets, sizeof(image->offsets));
+
+ return image;
+}
+
+static GLboolean
+intel_validate_usage(__DRIimage *image, unsigned int use)
+{
+ if (use & __DRI_IMAGE_USE_CURSOR) {
+ if (image->region->width != 64 || image->region->height != 64)
+ return GL_FALSE;
+ }
+
+ return GL_TRUE;
+}
+
+static __DRIimage *
+intel_create_image_from_names(__DRIscreen *screen,
+ int width, int height, int fourcc,
+ int *names, int num_names,
+ int *strides, int *offsets,
+ void *loaderPrivate)
+{
+ struct intel_image_format *f = NULL;
+ __DRIimage *image;
+ int i, index;
+
+ if (screen == NULL || names == NULL || num_names != 1)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(intel_image_formats); i++) {
+ if (intel_image_formats[i].fourcc == fourcc) {
+ f = &intel_image_formats[i];
+ }
+ }
+
+ if (f == NULL)
+ return NULL;
+
+ image = intel_create_image_from_name(screen, width, height,
+ __DRI_IMAGE_FORMAT_NONE,
+ names[0], strides[0],
+ loaderPrivate);
+
+ if (image == NULL)
+ return NULL;
+
+ image->planar_format = f;
+ for (i = 0; i < f->nplanes; i++) {
+ index = f->planes[i].buffer_index;
+ image->offsets[index] = offsets[index];
+ image->strides[index] = strides[index];
+ }
+
+ return image;
+}
+
+static __DRIimage *
+intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
+{
+ int width, height, offset, stride, dri_format, cpp, index, pitch;
+ struct intel_image_format *f;
+ uint32_t mask_x, mask_y;
+ __DRIimage *image;
+
+ if (parent == NULL || parent->planar_format == NULL)
+ return NULL;
+
+ f = parent->planar_format;
+
+ if (plane >= f->nplanes)
+ return NULL;
+
+ width = parent->region->width >> f->planes[plane].width_shift;
+ height = parent->region->height >> f->planes[plane].height_shift;
+ dri_format = f->planes[plane].dri_format;
+ index = f->planes[plane].buffer_index;
+ offset = parent->offsets[index];
+ stride = parent->strides[index];
+
+ image = intel_allocate_image(dri_format, loaderPrivate);
+ cpp = _mesa_get_format_bytes(image->format); /* safe since no none format */
+ pitch = stride / cpp;
+ if (offset + height * cpp * pitch > parent->region->bo->size) {
+ _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds");
+ FREE(image);
+ return NULL;
+ }
+
+ image->region = calloc(sizeof(*image->region), 1);
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ image->region->cpp = _mesa_get_format_bytes(image->format);
+ image->region->width = width;
+ image->region->height = height;
+ image->region->pitch = pitch;
+ image->region->refcount = 1;
+ image->region->bo = parent->region->bo;
+ drm_intel_bo_reference(image->region->bo);
+ image->region->tiling = parent->region->tiling;
+ image->region->screen = parent->region->screen;
+ image->offset = offset;
+
+ intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false);
+ if (offset & mask_x)
+ _mesa_warning(NULL,
+ "intel_create_sub_image: offset not on tile boundary");
+
+ return image;
+}
+
+static struct __DRIimageExtensionRec intelImageExtension = {
+ { __DRI_IMAGE, 5 },
+ intel_create_image_from_name,
+ intel_create_image_from_renderbuffer,
+ intel_destroy_image,
+ intel_create_image,
+ intel_query_image,
+ intel_dup_image,
+ intel_validate_usage,
+ intel_create_image_from_names,
+ intel_from_planar
+};
+
+static const __DRIextension *intelScreenExtensions[] = {
+ &intelTexBufferExtension.base,
+ &intelFlushExtension.base,
+ &intelImageExtension.base,
+ &dri2ConfigQueryExtension.base,
+ NULL
+};
+
+static bool
+intel_get_param(__DRIscreen *psp, int param, int *value)
+{
+ int ret;
+ struct drm_i915_getparam gp;
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = param;
+ gp.value = value;
+
+ ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
+ if (ret) {
+ if (ret != -EINVAL)
+ _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+intel_get_boolean(__DRIscreen *psp, int param)
+{
+ int value = 0;
+ return intel_get_param(psp, param, &value) && value;
+}
+
+static void
+nop_callback(GLuint key, void *data, void *userData)
+{
+}
+
+static void
+intelDestroyScreen(__DRIscreen * sPriv)
+{
+ struct intel_screen *intelScreen = sPriv->driverPrivate;
+
+ dri_bufmgr_destroy(intelScreen->bufmgr);
+ driDestroyOptionInfo(&intelScreen->optionCache);
+
+ /* Some regions may still have references to them at this point, so
+ * flush the hash table to prevent _mesa_DeleteHashTable() from
+ * complaining about the hash not being empty; */
+ _mesa_HashDeleteAll(intelScreen->named_regions, nop_callback, NULL);
+ _mesa_DeleteHashTable(intelScreen->named_regions);
+
+ FREE(intelScreen);
+ sPriv->driverPrivate = NULL;
+}
+
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+static GLboolean
+intelCreateBuffer(__DRIscreen * driScrnPriv,
+ __DRIdrawable * driDrawPriv,
+ const struct gl_config * mesaVis, GLboolean isPixmap)
+{
+ struct intel_renderbuffer *rb;
+ struct intel_screen *screen = (struct intel_screen*) driScrnPriv->driverPrivate;
+ gl_format rgbFormat;
+ unsigned num_samples = intel_quantize_num_samples(screen, mesaVis->samples);
+ struct gl_framebuffer *fb;
+
+ if (isPixmap)
+ return false;
+
+ fb = CALLOC_STRUCT(gl_framebuffer);
+ if (!fb)
+ return false;
+
+ _mesa_initialize_window_framebuffer(fb, mesaVis);
+
+ if (mesaVis->redBits == 5)
+ rgbFormat = MESA_FORMAT_RGB565;
+ else if (mesaVis->alphaBits == 0)
+ rgbFormat = MESA_FORMAT_XRGB8888;
+ else
+ rgbFormat = MESA_FORMAT_ARGB8888;
+
+ /* setup the hardware-based renderbuffers */
+ rb = intel_create_renderbuffer(rgbFormat, num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base.Base);
+
+ if (mesaVis->doubleBufferMode) {
+ rb = intel_create_renderbuffer(rgbFormat, num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base.Base);
+ }
+
+ /*
+ * Assert here that the gl_config has an expected depth/stencil bit
+ * combination: one of d24/s8, d16/s0, d0/s0. (See intelInitScreen2(),
+ * which constructs the advertised configs.)
+ */
+ if (mesaVis->depthBits == 24) {
+ assert(mesaVis->stencilBits == 8);
+
+ if (screen->hw_has_separate_stencil) {
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_X8_Z24,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_S8,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
+ } else {
+ /*
+ * Use combined depth/stencil. Note that the renderbuffer is
+ * attached to two attachment points.
+ */
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_S8_Z24,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
+ _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &rb->Base.Base);
+ }
+ }
+ else if (mesaVis->depthBits == 16) {
+ assert(mesaVis->stencilBits == 0);
+ rb = intel_create_private_renderbuffer(MESA_FORMAT_Z16,
+ num_samples);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
+ }
+ else {
+ assert(mesaVis->depthBits == 0);
+ assert(mesaVis->stencilBits == 0);
+ }
+
+ /* now add any/all software-based renderbuffers we may need */
+ _swrast_add_soft_renderbuffers(fb,
+ false, /* never sw color */
+ false, /* never sw depth */
+ false, /* never sw stencil */
+ mesaVis->accumRedBits > 0,
+ false, /* never sw alpha */
+ false /* never sw aux */ );
+ driDrawPriv->driverPrivate = fb;
+
+ return true;
+}
+
+static void
+intelDestroyBuffer(__DRIdrawable * driDrawPriv)
+{
+ struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
+
+ _mesa_reference_framebuffer(&fb, NULL);
+}
+
+/* There are probably better ways to do this, such as an
+ * init-designated function to register chipids and createcontext
+ * functions.
+ */
+extern bool
+i830CreateContext(const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ void *sharedContextPrivate);
+
+extern bool
+i915CreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ unsigned *error,
+ void *sharedContextPrivate);
+extern bool
+brwCreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ uint32_t flags,
+ unsigned *error,
+ void *sharedContextPrivate);
+
+static GLboolean
+intelCreateContext(gl_api api,
+ const struct gl_config * mesaVis,
+ __DRIcontext * driContextPriv,
+ unsigned major_version,
+ unsigned minor_version,
+ uint32_t flags,
+ unsigned *error,
+ void *sharedContextPrivate)
+{
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ struct intel_screen *intelScreen = sPriv->driverPrivate;
+ bool success = false;
+
+#ifdef I915
+ if (IS_9XX(intelScreen->deviceID)) {
+ success = i915CreateContext(api, mesaVis, driContextPriv,
+ major_version, minor_version, error,
+ sharedContextPrivate);
+ } else {
+ switch (api) {
+ case API_OPENGL:
+ if (major_version > 1 || minor_version > 3) {
+ *error = __DRI_CTX_ERROR_BAD_VERSION;
+ success = false;
+ }
+ break;
+ case API_OPENGLES:
+ break;
+ default:
+ *error = __DRI_CTX_ERROR_BAD_API;
+ success = false;
+ }
+
+ if (success) {
+ intelScreen->no_vbo = true;
+ success = i830CreateContext(mesaVis, driContextPriv,
+ sharedContextPrivate);
+ if (!success)
+ *error = __DRI_CTX_ERROR_NO_MEMORY;
+ }
+ }
+#else
+ success = brwCreateContext(api, mesaVis,
+ driContextPriv,
+ major_version, minor_version, flags,
+ error, sharedContextPrivate);
+#endif
+
+ if (success)
+ return true;
+
+ if (driContextPriv->driverPrivate != NULL)
+ intelDestroyContext(driContextPriv);
+
+ return false;
+}
+
+static bool
+intel_init_bufmgr(struct intel_screen *intelScreen)
+{
+ __DRIscreen *spriv = intelScreen->driScrnPriv;
+ int num_fences = 0;
+
+ intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
+
+ intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
+ if (intelScreen->bufmgr == NULL) {
+ fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
+ __func__, __LINE__);
+ return false;
+ }
+
+ if (!intel_get_param(spriv, I915_PARAM_NUM_FENCES_AVAIL, &num_fences) ||
+ num_fences == 0) {
+ fprintf(stderr, "[%s: %u] Kernel 2.6.29 required.\n", __func__, __LINE__);
+ return false;
+ }
+
+ drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
+
+ intelScreen->named_regions = _mesa_NewHashTable();
+
+ intelScreen->relaxed_relocations = 0;
+ intelScreen->relaxed_relocations |=
+ intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA) << 0;
+
+ return true;
+}
+
+/**
+ * Override intel_screen.hw_has_separate_stencil with environment variable
+ * INTEL_SEPARATE_STENCIL.
+ *
+ * Valid values for INTEL_SEPARATE_STENCIL are "0" and "1". If an invalid
+ * valid value is encountered, a warning is emitted and INTEL_SEPARATE_STENCIL
+ * is ignored.
+ */
+static void
+intel_override_separate_stencil(struct intel_screen *screen)
+{
+ const char *s = getenv("INTEL_SEPARATE_STENCIL");
+ if (!s) {
+ return;
+ } else if (!strncmp("0", s, 2)) {
+ screen->hw_has_separate_stencil = false;
+ } else if (!strncmp("1", s, 2)) {
+ screen->hw_has_separate_stencil = true;
+ } else {
+ fprintf(stderr,
+ "warning: env variable INTEL_SEPARATE_STENCIL=\"%s\" has "
+ "invalid value and is ignored", s);
+ }
+}
+
+static bool
+intel_detect_swizzling(struct intel_screen *screen)
+{
+ drm_intel_bo *buffer;
+ unsigned long flags = 0;
+ unsigned long aligned_pitch;
+ uint32_t tiling = I915_TILING_X;
+ uint32_t swizzle_mode = 0;
+
+ buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
+ 64, 64, 4,
+ &tiling, &aligned_pitch, flags);
+ if (buffer == NULL)
+ return false;
+
+ drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
+ drm_intel_bo_unreference(buffer);
+
+ if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
+ return false;
+ else
+ return true;
+}
+
+static __DRIconfig**
+intel_screen_make_configs(__DRIscreen *dri_screen)
+{
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+
+ static const uint8_t singlesample_samples[1] = {0};
+ static const uint8_t multisample_samples[2] = {4, 8};
+
+ struct intel_screen *screen = dri_screen->driverPrivate;
+ GLenum fb_format[3];
+ GLenum fb_type[3];
+ uint8_t depth_bits[4], stencil_bits[4];
+ __DRIconfig **configs = NULL;
+
+ fb_format[0] = GL_RGB;
+ fb_type[0] = GL_UNSIGNED_SHORT_5_6_5;
+
+ fb_format[1] = GL_BGR;
+ fb_type[1] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ fb_format[2] = GL_BGRA;
+ fb_type[2] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ /* Generate singlesample configs without accumulation buffer. */
+ for (int i = 0; i < ARRAY_SIZE(fb_format); i++) {
+ __DRIconfig **new_configs;
+ const int num_depth_stencil_bits = 2;
+
+ /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
+ * buffer that has a different number of bits per pixel than the color
+ * buffer. This isn't yet supported here.
+ */
+ depth_bits[0] = 0;
+ stencil_bits[0] = 0;
+
+ if (fb_type[i] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[1] = 16;
+ stencil_bits[1] = 0;
+ } else {
+ depth_bits[1] = 24;
+ stencil_bits[1] = 8;
+ }
+
+ new_configs = driCreateConfigs(fb_format[i], fb_type[i],
+ depth_bits,
+ stencil_bits,
+ num_depth_stencil_bits,
+ back_buffer_modes,
+ ARRAY_SIZE(back_buffer_modes),
+ singlesample_samples, 1,
+ false);
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ /* Generate the minimum possible set of configs that include an
+ * accumulation buffer.
+ */
+ for (int i = 0; i < ARRAY_SIZE(fb_format); i++) {
+ __DRIconfig **new_configs;
+
+ if (fb_type[i] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[0] = 16;
+ stencil_bits[0] = 0;
+ } else {
+ depth_bits[0] = 24;
+ stencil_bits[0] = 8;
+ }
+
+ new_configs = driCreateConfigs(fb_format[i], fb_type[i],
+ depth_bits, stencil_bits, 1,
+ back_buffer_modes + 1, 1,
+ singlesample_samples, 1,
+ true);
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ /* Generate multisample configs.
+ *
+ * This loop breaks early, and hence is a no-op, on gen < 6.
+ *
+ * Multisample configs must follow the singlesample configs in order to
+ * work around an X server bug present in 1.12. The X server chooses to
+ * associate the first listed RGBA888-Z24S8 config, regardless of its
+ * sample count, with the 32-bit depth visual used for compositing.
+ *
+ * Only doublebuffer configs with GLX_SWAP_UNDEFINED_OML behavior are
+ * supported. Singlebuffer configs are not supported because no one wants
+ * them. GLX_SWAP_COPY_OML is not supported due to page flipping.
+ */
+ for (int i = 0; i < ARRAY_SIZE(fb_format); i++) {
+ if (screen->gen < 6)
+ break;
+
+ __DRIconfig **new_configs;
+ const int num_depth_stencil_bits = 2;
+ int num_msaa_modes = 0;
+
+ depth_bits[0] = 0;
+ stencil_bits[0] = 0;
+
+ if (fb_type[i] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[1] = 16;
+ stencil_bits[1] = 0;
+ } else {
+ depth_bits[1] = 24;
+ stencil_bits[1] = 8;
+ }
+
+ if (screen->gen >= 7)
+ num_msaa_modes = 2;
+ else if (screen->gen == 6)
+ num_msaa_modes = 1;
+
+ new_configs = driCreateConfigs(fb_format[i], fb_type[i],
+ depth_bits,
+ stencil_bits,
+ num_depth_stencil_bits,
+ back_buffer_modes + 1, 1,
+ multisample_samples,
+ num_msaa_modes,
+ false);
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ if (configs == NULL) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+
+ return configs;
+}
+
+/**
+ * This is the driver specific part of the createNewScreen entry point.
+ * Called when using DRI2.
+ *
+ * \return the struct gl_config supported by this driver
+ */
+static const
+__DRIconfig **intelInitScreen2(__DRIscreen *psp)
+{
+ struct intel_screen *intelScreen;
+ unsigned int api_mask;
+
+ if (psp->dri2.loader->base.version <= 2 ||
+ psp->dri2.loader->getBuffersWithFormat == NULL) {
+ fprintf(stderr,
+ "\nERROR! DRI2 loader with getBuffersWithFormat() "
+ "support required\n");
+ return false;
+ }
+
+ /* Allocate the private area */
+ intelScreen = CALLOC(sizeof *intelScreen);
+ if (!intelScreen) {
+ fprintf(stderr, "\nERROR! Allocating private area failed\n");
+ return false;
+ }
+ /* parse information in __driConfigOptions */
+ driParseOptionInfo(&intelScreen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ intelScreen->driScrnPriv = psp;
+ psp->driverPrivate = (void *) intelScreen;
+
+ if (!intel_init_bufmgr(intelScreen))
+ return false;
+
+ intelScreen->deviceID = drm_intel_bufmgr_gem_get_devid(intelScreen->bufmgr);
+
+ intelScreen->kernel_has_gen7_sol_reset =
+ intel_get_boolean(intelScreen->driScrnPriv,
+ I915_PARAM_HAS_GEN7_SOL_RESET);
+
+ if (IS_GEN7(intelScreen->deviceID)) {
+ intelScreen->gen = 7;
+ } else if (IS_GEN6(intelScreen->deviceID)) {
+ intelScreen->gen = 6;
+ } else if (IS_GEN5(intelScreen->deviceID)) {
+ intelScreen->gen = 5;
+ } else if (IS_965(intelScreen->deviceID)) {
+ intelScreen->gen = 4;
+ } else if (IS_9XX(intelScreen->deviceID)) {
+ intelScreen->gen = 3;
+ } else {
+ intelScreen->gen = 2;
+ }
+
+ intelScreen->hw_has_separate_stencil = intelScreen->gen >= 6;
+ intelScreen->hw_must_use_separate_stencil = intelScreen->gen >= 7;
+
+ int has_llc = 0;
+ bool success = intel_get_param(intelScreen->driScrnPriv, I915_PARAM_HAS_LLC,
+ &has_llc);
+ if (success && has_llc)
+ intelScreen->hw_has_llc = true;
+ else if (!success && intelScreen->gen >= 6)
+ intelScreen->hw_has_llc = true;
+
+ intel_override_separate_stencil(intelScreen);
+
+ api_mask = (1 << __DRI_API_OPENGL);
+#if FEATURE_ES1
+ api_mask |= (1 << __DRI_API_GLES);
+#endif
+#if FEATURE_ES2
+ api_mask |= (1 << __DRI_API_GLES2);
+#endif
+
+ if (IS_9XX(intelScreen->deviceID) || IS_965(intelScreen->deviceID))
+ psp->api_mask = api_mask;
+
+ intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
+
+ psp->extensions = intelScreenExtensions;
+
+ return (const __DRIconfig**) intel_screen_make_configs(psp);
+}
+
+struct intel_buffer {
+ __DRIbuffer base;
+ struct intel_region *region;
+};
+
+static __DRIbuffer *
+intelAllocateBuffer(__DRIscreen *screen,
+ unsigned attachment, unsigned format,
+ int width, int height)
+{
+ struct intel_buffer *intelBuffer;
+ struct intel_screen *intelScreen = screen->driverPrivate;
+
+ assert(attachment == __DRI_BUFFER_FRONT_LEFT ||
+ attachment == __DRI_BUFFER_BACK_LEFT);
+
+ intelBuffer = CALLOC(sizeof *intelBuffer);
+ if (intelBuffer == NULL)
+ return NULL;
+
+ /* The front and back buffers are color buffers, which are X tiled. */
+ intelBuffer->region = intel_region_alloc(intelScreen,
+ I915_TILING_X,
+ format / 8,
+ width,
+ height,
+ true);
+
+ if (intelBuffer->region == NULL) {
+ FREE(intelBuffer);
+ return NULL;
+ }
+
+ intel_region_flink(intelBuffer->region, &intelBuffer->base.name);
+
+ intelBuffer->base.attachment = attachment;
+ intelBuffer->base.cpp = intelBuffer->region->cpp;
+ intelBuffer->base.pitch =
+ intelBuffer->region->pitch * intelBuffer->region->cpp;
+
+ return &intelBuffer->base;
+}
+
+static void
+intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
+{
+ struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
+
+ intel_region_release(&intelBuffer->region);
+ free(intelBuffer);
+}
+
+
+const struct __DriverAPIRec driDriverAPI = {
+ .InitScreen = intelInitScreen2,
+ .DestroyScreen = intelDestroyScreen,
+ .CreateContext = intelCreateContext,
+ .DestroyContext = intelDestroyContext,
+ .CreateBuffer = intelCreateBuffer,
+ .DestroyBuffer = intelDestroyBuffer,
+ .MakeCurrent = intelMakeCurrent,
+ .UnbindContext = intelUnbindContext,
+ .AllocateBuffer = intelAllocateBuffer,
+ .ReleaseBuffer = intelReleaseBuffer
+};
+
+/* This is the table of extensions that the loader will dlsym() for. */
+PUBLIC const __DRIextension *__driDriverExtensions[] = {
+ &driCoreExtension.base,
+ &driDRI2Extension.base,
+ NULL
+};
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_span.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_span.c
index 05e5e8e5833..d7eaa41241e 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_span.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_span.c
@@ -1 +1,215 @@
-../intel/intel_span.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2011 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Chad Versace <chad@chad-versace.us>
+ *
+ **************************************************************************/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "main/glheader.h"
+#include "main/macros.h"
+#include "main/mtypes.h"
+#include "main/colormac.h"
+#include "main/renderbuffer.h"
+
+#include "intel_buffers.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_screen.h"
+#include "intel_span.h"
+#include "intel_regions.h"
+#include "intel_tex.h"
+
+#include "swrast/swrast.h"
+#include "swrast/s_renderbuffer.h"
+
+/**
+ * \brief Get pointer offset into stencil buffer.
+ *
+ * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
+ * must decode the tile's layout in software.
+ *
+ * See
+ * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
+ * Format.
+ * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
+ *
+ * Even though the returned offset is always positive, the return type is
+ * signed due to
+ * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
+ * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
+ */
+intptr_t
+intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled)
+{
+ uint32_t tile_size = 4096;
+ uint32_t tile_width = 64;
+ uint32_t tile_height = 64;
+ uint32_t row_size = 64 * stride;
+
+ uint32_t tile_x = x / tile_width;
+ uint32_t tile_y = y / tile_height;
+
+ /* The byte's address relative to the tile's base addres. */
+ uint32_t byte_x = x % tile_width;
+ uint32_t byte_y = y % tile_height;
+
+ uintptr_t u = tile_y * row_size
+ + tile_x * tile_size
+ + 512 * (byte_x / 8)
+ + 64 * (byte_y / 8)
+ + 32 * ((byte_y / 4) % 2)
+ + 16 * ((byte_x / 4) % 2)
+ + 8 * ((byte_y / 2) % 2)
+ + 4 * ((byte_x / 2) % 2)
+ + 2 * (byte_y % 2)
+ + 1 * (byte_x % 2);
+
+ if (swizzled) {
+ /* adjust for bit6 swizzling */
+ if (((byte_x / 8) % 2) == 1) {
+ if (((byte_y / 8) % 2) == 0) {
+ u += 64;
+ } else {
+ u -= 64;
+ }
+ }
+ }
+
+ return u;
+}
+
+/**
+ * Map the regions needed by intelSpanRenderStart().
+ */
+static void
+intel_span_map_buffers(struct intel_context *intel)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct intel_texture_object *tex_obj;
+
+ for (int i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (!ctx->Texture.Unit[i]._ReallyEnabled)
+ continue;
+ tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+ intel_finalize_mipmap_tree(intel, i);
+ intel_tex_map_images(intel, tex_obj,
+ GL_MAP_READ_BIT | GL_MAP_WRITE_BIT);
+ }
+
+ _swrast_map_renderbuffers(ctx);
+}
+
+/**
+ * Prepare for software rendering. Map current read/draw framebuffers'
+ * renderbuffes and all currently bound texture objects.
+ *
+ * Old note: Moved locking out to get reasonable span performance.
+ */
+void
+intelSpanRenderStart(struct gl_context * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush(ctx);
+ intel_prepare_render(intel);
+ intel_flush(ctx);
+ intel_span_map_buffers(intel);
+}
+
+/**
+ * Called when done software rendering. Unmap the buffers we mapped in
+ * the above function.
+ */
+void
+intelSpanRenderFinish(struct gl_context * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ _swrast_flush(ctx);
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ _swrast_unmap_renderbuffers(ctx);
+}
+
+
+void
+intelInitSpanFuncs(struct gl_context * ctx)
+{
+ struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
+ if (swdd) {
+ swdd->SpanRenderStart = intelSpanRenderStart;
+ swdd->SpanRenderFinish = intelSpanRenderFinish;
+ }
+}
+
+void
+intel_map_vertex_shader_textures(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ int i;
+
+ if (ctx->VertexProgram._Current == NULL)
+ return;
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled &&
+ ctx->VertexProgram._Current->Base.TexturesUsed[i] != 0) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+
+ intel_tex_map_images(intel, intel_texture_object(texObj),
+ GL_MAP_READ_BIT | GL_MAP_WRITE_BIT);
+ }
+ }
+}
+
+void
+intel_unmap_vertex_shader_textures(struct gl_context *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ int i;
+
+ if (ctx->VertexProgram._Current == NULL)
+ return;
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled &&
+ ctx->VertexProgram._Current->Base.TexturesUsed[i] != 0) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_state.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_state.c
index 519672fc359..6a817cdf3f6 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_state.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_state.c
@@ -1 +1,195 @@
-../intel/intel_state.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/macros.h"
+#include "main/enums.h"
+#include "main/colormac.h"
+#include "main/dd.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+
+int
+intel_translate_shadow_compare_func(GLenum func)
+{
+ switch (func) {
+ case GL_NEVER:
+ return COMPAREFUNC_ALWAYS;
+ case GL_LESS:
+ return COMPAREFUNC_LEQUAL;
+ case GL_LEQUAL:
+ return COMPAREFUNC_LESS;
+ case GL_GREATER:
+ return COMPAREFUNC_GEQUAL;
+ case GL_GEQUAL:
+ return COMPAREFUNC_GREATER;
+ case GL_NOTEQUAL:
+ return COMPAREFUNC_EQUAL;
+ case GL_EQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case GL_ALWAYS:
+ return COMPAREFUNC_NEVER;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_NEVER;
+}
+
+int
+intel_translate_compare_func(GLenum func)
+{
+ switch (func) {
+ case GL_NEVER:
+ return COMPAREFUNC_NEVER;
+ case GL_LESS:
+ return COMPAREFUNC_LESS;
+ case GL_LEQUAL:
+ return COMPAREFUNC_LEQUAL;
+ case GL_GREATER:
+ return COMPAREFUNC_GREATER;
+ case GL_GEQUAL:
+ return COMPAREFUNC_GEQUAL;
+ case GL_NOTEQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case GL_EQUAL:
+ return COMPAREFUNC_EQUAL;
+ case GL_ALWAYS:
+ return COMPAREFUNC_ALWAYS;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_ALWAYS;
+}
+
+int
+intel_translate_stencil_op(GLenum op)
+{
+ switch (op) {
+ case GL_KEEP:
+ return STENCILOP_KEEP;
+ case GL_ZERO:
+ return STENCILOP_ZERO;
+ case GL_REPLACE:
+ return STENCILOP_REPLACE;
+ case GL_INCR:
+ return STENCILOP_INCRSAT;
+ case GL_DECR:
+ return STENCILOP_DECRSAT;
+ case GL_INCR_WRAP:
+ return STENCILOP_INCR;
+ case GL_DECR_WRAP:
+ return STENCILOP_DECR;
+ case GL_INVERT:
+ return STENCILOP_INVERT;
+ default:
+ return STENCILOP_ZERO;
+ }
+}
+
+int
+intel_translate_blend_factor(GLenum factor)
+{
+ switch (factor) {
+ case GL_ZERO:
+ return BLENDFACT_ZERO;
+ case GL_SRC_ALPHA:
+ return BLENDFACT_SRC_ALPHA;
+ case GL_ONE:
+ return BLENDFACT_ONE;
+ case GL_SRC_COLOR:
+ return BLENDFACT_SRC_COLR;
+ case GL_ONE_MINUS_SRC_COLOR:
+ return BLENDFACT_INV_SRC_COLR;
+ case GL_DST_COLOR:
+ return BLENDFACT_DST_COLR;
+ case GL_ONE_MINUS_DST_COLOR:
+ return BLENDFACT_INV_DST_COLR;
+ case GL_ONE_MINUS_SRC_ALPHA:
+ return BLENDFACT_INV_SRC_ALPHA;
+ case GL_DST_ALPHA:
+ return BLENDFACT_DST_ALPHA;
+ case GL_ONE_MINUS_DST_ALPHA:
+ return BLENDFACT_INV_DST_ALPHA;
+ case GL_SRC_ALPHA_SATURATE:
+ return BLENDFACT_SRC_ALPHA_SATURATE;
+ case GL_CONSTANT_COLOR:
+ return BLENDFACT_CONST_COLOR;
+ case GL_ONE_MINUS_CONSTANT_COLOR:
+ return BLENDFACT_INV_CONST_COLOR;
+ case GL_CONSTANT_ALPHA:
+ return BLENDFACT_CONST_ALPHA;
+ case GL_ONE_MINUS_CONSTANT_ALPHA:
+ return BLENDFACT_INV_CONST_ALPHA;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, factor);
+ return BLENDFACT_ZERO;
+}
+
+int
+intel_translate_logic_op(GLenum opcode)
+{
+ switch (opcode) {
+ case GL_CLEAR:
+ return LOGICOP_CLEAR;
+ case GL_AND:
+ return LOGICOP_AND;
+ case GL_AND_REVERSE:
+ return LOGICOP_AND_RVRSE;
+ case GL_COPY:
+ return LOGICOP_COPY;
+ case GL_COPY_INVERTED:
+ return LOGICOP_COPY_INV;
+ case GL_AND_INVERTED:
+ return LOGICOP_AND_INV;
+ case GL_NOOP:
+ return LOGICOP_NOOP;
+ case GL_XOR:
+ return LOGICOP_XOR;
+ case GL_OR:
+ return LOGICOP_OR;
+ case GL_OR_INVERTED:
+ return LOGICOP_OR_INV;
+ case GL_NOR:
+ return LOGICOP_NOR;
+ case GL_EQUIV:
+ return LOGICOP_EQUIV;
+ case GL_INVERT:
+ return LOGICOP_INV;
+ case GL_OR_REVERSE:
+ return LOGICOP_OR_RVRSE;
+ case GL_NAND:
+ return LOGICOP_NAND;
+ case GL_SET:
+ return LOGICOP_SET;
+ default:
+ return LOGICOP_SET;
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_syncobj.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_syncobj.c
index 0b2e56ab246..e965896df60 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_syncobj.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_syncobj.c
@@ -1 +1,132 @@
-../intel/intel_syncobj.c \ No newline at end of file
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file intel_syncobj.c
+ *
+ * Support for ARB_sync
+ *
+ * ARB_sync is implemented by flushing the current batchbuffer and keeping a
+ * reference on it. We can then check for completion or wait for completion
+ * using the normal buffer object mechanisms. This does mean that if an
+ * application is using many sync objects, it will emit small batchbuffers
+ * which may end up being a significant overhead. In other tests of removing
+ * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
+ * performance bottleneck, though.
+ */
+
+#include "main/simple_list.h"
+#include "main/imports.h"
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_reg.h"
+
+static struct gl_sync_object *
+intel_new_sync_object(struct gl_context *ctx, GLuint id)
+{
+ struct intel_sync_object *sync;
+
+ sync = calloc(1, sizeof(struct intel_sync_object));
+
+ return &sync->Base;
+}
+
+static void
+intel_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ drm_intel_bo_unreference(sync->bo);
+ free(sync);
+}
+
+static void
+intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLenum condition, GLbitfield flags)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
+ intel_batchbuffer_emit_mi_flush(intel);
+
+ sync->bo = intel->batch.bo;
+ drm_intel_bo_reference(sync->bo);
+
+ intel_flush(ctx);
+}
+
+/* We ignore the user-supplied timeout. This is weaselly -- we're allowed to
+ * round to an implementation-dependent accuracy, and right now our
+ * implementation "rounds" to the wait-forever value.
+ *
+ * The fix would be a new kernel function to do the GTT transition with a
+ * timeout.
+ */
+static void intel_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ if (sync->bo) {
+ drm_intel_bo_wait_rendering(sync->bo);
+ s->StatusFlag = 1;
+ drm_intel_bo_unreference(sync->bo);
+ sync->bo = NULL;
+ }
+}
+
+/* We have nothing to do for WaitSync. Our GL command stream is sequential,
+ * so given that the sync object has already flushed the batchbuffer,
+ * any batchbuffers coming after this waitsync will naturally not occur until
+ * the previous one is done.
+ */
+static void intel_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
+{
+}
+
+static void intel_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
+ drm_intel_bo_unreference(sync->bo);
+ sync->bo = NULL;
+ s->StatusFlag = 1;
+ }
+}
+
+void intel_init_syncobj_functions(struct dd_function_table *functions)
+{
+ functions->NewSyncObject = intel_new_sync_object;
+ functions->DeleteSyncObject = intel_delete_sync_object;
+ functions->FenceSync = intel_fence_sync;
+ functions->CheckSync = intel_check_sync;
+ functions->ClientWaitSync = intel_client_wait_sync;
+ functions->ServerWaitSync = intel_server_wait_sync;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex.c
index d77ce749a3e..5d938798d9f 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex.c
@@ -1 +1,222 @@
-../intel/intel_tex.c \ No newline at end of file
+#include "swrast/swrast.h"
+#include "main/renderbuffer.h"
+#include "main/texobj.h"
+#include "main/teximage.h"
+#include "main/mipmap.h"
+#include "drivers/common/meta.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static struct gl_texture_image *
+intelNewTextureImage(struct gl_context * ctx)
+{
+ DBG("%s\n", __FUNCTION__);
+ (void) ctx;
+ return (struct gl_texture_image *) CALLOC_STRUCT(intel_texture_image);
+}
+
+static void
+intelDeleteTextureImage(struct gl_context * ctx, struct gl_texture_image *img)
+{
+ /* nothing special (yet) for intel_texture_image */
+ _mesa_delete_texture_image(ctx, img);
+}
+
+
+static struct gl_texture_object *
+intelNewTextureObject(struct gl_context * ctx, GLuint name, GLenum target)
+{
+ struct intel_texture_object *obj = CALLOC_STRUCT(intel_texture_object);
+
+ (void) ctx;
+
+ DBG("%s\n", __FUNCTION__);
+ _mesa_initialize_texture_object(&obj->base, name, target);
+
+ return &obj->base;
+}
+
+static void
+intelDeleteTextureObject(struct gl_context *ctx,
+ struct gl_texture_object *texObj)
+{
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ intel_miptree_release(&intelObj->mt);
+ _mesa_delete_texture_object(ctx, texObj);
+}
+
+static GLboolean
+intel_alloc_texture_image_buffer(struct gl_context *ctx,
+ struct gl_texture_image *image)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct gl_texture_object *texobj = image->TexObject;
+ struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
+ GLuint slices;
+
+ assert(image->Border == 0);
+
+ /* Because the driver uses AllocTextureImageBuffer() internally, it may end
+ * up mismatched with FreeTextureImageBuffer(), but that is safe to call
+ * multiple times.
+ */
+ ctx->Driver.FreeTextureImageBuffer(ctx, image);
+
+ /* Allocate the swrast_texture_image::ImageOffsets array now */
+ switch (texobj->Target) {
+ case GL_TEXTURE_3D:
+ case GL_TEXTURE_2D_ARRAY:
+ slices = image->Depth;
+ break;
+ case GL_TEXTURE_1D_ARRAY:
+ slices = image->Height;
+ break;
+ default:
+ slices = 1;
+ }
+ assert(!intel_image->base.ImageOffsets);
+ intel_image->base.ImageOffsets = malloc(slices * sizeof(GLuint));
+
+ _swrast_init_texture_image(image);
+
+ if (intel_texobj->mt &&
+ intel_miptree_match_image(intel_texobj->mt, image)) {
+ intel_miptree_reference(&intel_image->mt, intel_texobj->mt);
+ DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n",
+ __FUNCTION__, texobj, image->Level,
+ image->Width, image->Height, image->Depth, intel_texobj->mt);
+ } else {
+ intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj,
+ intel_image,
+ false);
+
+ /* Even if the object currently has a mipmap tree associated
+ * with it, this one is a more likely candidate to represent the
+ * whole object since our level didn't fit what was there
+ * before, and any lower levels would fit into our miptree.
+ */
+ intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
+
+ DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n",
+ __FUNCTION__, texobj, image->Level,
+ image->Width, image->Height, image->Depth, intel_image->mt);
+ }
+
+ return true;
+}
+
+/**
+ * Called via ctx->Driver.AllocTextureStorage()
+ * Just have to allocate memory for the texture images.
+ */
+static GLboolean
+intel_alloc_texture_storage(struct gl_context *ctx,
+ struct gl_texture_object *texObj,
+ GLsizei levels, GLsizei width,
+ GLsizei height, GLsizei depth)
+{
+ const int numFaces = _mesa_num_tex_faces(texObj->Target);
+ int face;
+ int level;
+
+ for (face = 0; face < numFaces; face++) {
+ for (level = 0; level < levels; level++) {
+ struct gl_texture_image *const texImage = texObj->Image[face][level];
+ if (!intel_alloc_texture_image_buffer(ctx, texImage))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void
+intel_free_texture_image_buffer(struct gl_context * ctx,
+ struct gl_texture_image *texImage)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ DBG("%s\n", __FUNCTION__);
+
+ intel_miptree_release(&intelImage->mt);
+
+ if (intelImage->base.Buffer) {
+ _mesa_align_free(intelImage->base.Buffer);
+ intelImage->base.Buffer = NULL;
+ }
+
+ if (intelImage->base.ImageOffsets) {
+ free(intelImage->base.ImageOffsets);
+ intelImage->base.ImageOffsets = NULL;
+ }
+}
+
+/**
+ * Map texture memory/buffer into user space.
+ * Note: the region of interest parameters are ignored here.
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
+ * \param mapOut returns start of mapping of region of interest
+ * \param rowStrideOut returns row stride in bytes
+ */
+static void
+intel_map_texture_image(struct gl_context *ctx,
+ struct gl_texture_image *tex_image,
+ GLuint slice,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **map,
+ GLint *stride)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(tex_image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+
+ /* Our texture data is always stored in a miptree. */
+ assert(mt);
+
+ /* Check that our caller wasn't confused about how to map a 1D texture. */
+ assert(tex_image->TexObject->Target != GL_TEXTURE_1D_ARRAY ||
+ h == 1);
+
+ /* intel_miptree_map operates on a unified "slice" number that references the
+ * cube face, since it's all just slices to the miptree code.
+ */
+ if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
+ slice = tex_image->Face;
+
+ intel_miptree_map(intel, mt, tex_image->Level, slice, x, y, w, h, mode,
+ (void **)map, stride);
+}
+
+static void
+intel_unmap_texture_image(struct gl_context *ctx,
+ struct gl_texture_image *tex_image, GLuint slice)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(tex_image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+
+ if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
+ slice = tex_image->Face;
+
+ intel_miptree_unmap(intel, mt, tex_image->Level, slice);
+}
+
+void
+intelInitTextureFuncs(struct dd_function_table *functions)
+{
+ functions->NewTextureObject = intelNewTextureObject;
+ functions->NewTextureImage = intelNewTextureImage;
+ functions->DeleteTextureImage = intelDeleteTextureImage;
+ functions->DeleteTexture = intelDeleteTextureObject;
+ functions->AllocTextureImageBuffer = intel_alloc_texture_image_buffer;
+ functions->FreeTextureImageBuffer = intel_free_texture_image_buffer;
+ functions->AllocTextureStorage = intel_alloc_texture_storage;
+ functions->MapTextureImage = intel_map_texture_image;
+ functions->UnmapTextureImage = intel_unmap_texture_image;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_copy.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_copy.c
index 87196c5d1ed..f4366330f88 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_copy.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_copy.c
@@ -1 +1,171 @@
-../intel/intel_tex_copy.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/teximage.h"
+#include "main/texstate.h"
+#include "main/fbobject.h"
+
+#include "drivers/common/meta.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_fbo.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+
+bool
+intel_copy_texsubimage(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ GLint dstx, GLint dsty,
+ struct intel_renderbuffer *irb,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct intel_region *region;
+ const GLenum internalFormat = intelImage->base.Base.InternalFormat;
+ bool copy_supported = false;
+ bool copy_supported_with_alpha_override = false;
+
+ intel_prepare_render(intel);
+
+ if (!intelImage->mt || !irb || !irb->mt) {
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF))
+ fprintf(stderr, "%s fail %p %p (0x%08x)\n",
+ __FUNCTION__, intelImage->mt, irb, internalFormat);
+ return false;
+ } else {
+ region = irb->mt->region;
+ assert(region);
+ }
+
+ copy_supported = intelImage->base.Base.TexFormat == intel_rb_format(irb);
+
+ /* Converting ARGB8888 to XRGB8888 is trivial: ignore the alpha bits */
+ if (intel_rb_format(irb) == MESA_FORMAT_ARGB8888 &&
+ intelImage->base.Base.TexFormat == MESA_FORMAT_XRGB8888) {
+ copy_supported = true;
+ }
+
+ /* Converting XRGB8888 to ARGB8888 requires setting the alpha bits to 1.0 */
+ if (intel_rb_format(irb) == MESA_FORMAT_XRGB8888 &&
+ intelImage->base.Base.TexFormat == MESA_FORMAT_ARGB8888) {
+ copy_supported_with_alpha_override = true;
+ }
+
+ if (!copy_supported && !copy_supported_with_alpha_override) {
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF))
+ fprintf(stderr, "%s mismatched formats %s, %s\n",
+ __FUNCTION__,
+ _mesa_get_format_name(intelImage->base.Base.TexFormat),
+ _mesa_get_format_name(intel_rb_format(irb)));
+ return false;
+ }
+
+ {
+ GLuint image_x, image_y;
+ GLshort src_pitch;
+
+ /* get dest x/y in destination texture */
+ intel_miptree_get_image_offset(intelImage->mt,
+ intelImage->base.Base.Level,
+ intelImage->base.Base.Face,
+ 0,
+ &image_x, &image_y);
+
+ /* The blitter can't handle Y-tiled buffers. */
+ if (intelImage->mt->region->tiling == I915_TILING_Y) {
+ return false;
+ }
+
+ if (_mesa_is_winsys_fbo(ctx->ReadBuffer)) {
+ /* Flip vertical orientation for system framebuffers */
+ y = ctx->ReadBuffer->Height - (y + height);
+ src_pitch = -region->pitch;
+ } else {
+ /* reading from a FBO, y is already oriented the way we like */
+ src_pitch = region->pitch;
+ }
+
+ /* blit from src buffer to texture */
+ if (!intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_pitch,
+ region->bo,
+ 0,
+ region->tiling,
+ intelImage->mt->region->pitch,
+ intelImage->mt->region->bo,
+ 0,
+ intelImage->mt->region->tiling,
+ irb->draw_x + x, irb->draw_y + y,
+ image_x + dstx, image_y + dsty,
+ width, height,
+ GL_COPY)) {
+ return false;
+ }
+ }
+
+ if (copy_supported_with_alpha_override)
+ intel_set_teximage_alpha_to_one(ctx, intelImage);
+
+ return true;
+}
+
+
+static void
+intelCopyTexSubImage(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ struct gl_renderbuffer *rb,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height)
+{
+ if (dims == 3 || !intel_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ xoffset, yoffset,
+ intel_renderbuffer(rb), x, y, width, height)) {
+ fallback_debug("%s - fallback to swrast\n", __FUNCTION__);
+ _mesa_meta_CopyTexSubImage(ctx, dims, texImage,
+ xoffset, yoffset, zoffset,
+ rb, x, y, width, height);
+ }
+}
+
+
+void
+intelInitTextureCopyImageFuncs(struct dd_function_table *functions)
+{
+ functions->CopyTexSubImage = intelCopyTexSubImage;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_format.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_format.c
index 3415f754705..f53054d55d3 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_format.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_format.c
@@ -1 +1,65 @@
-../intel/intel_tex_format.c \ No newline at end of file
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "main/enums.h"
+#include "main/formats.h"
+
+/**
+ * Returns the renderbuffer DataType for a MESA_FORMAT.
+ */
+GLenum
+intel_mesa_format_to_rb_datatype(gl_format format)
+{
+ switch (format) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ case MESA_FORMAT_SARGB8:
+ case MESA_FORMAT_R8:
+ case MESA_FORMAT_GR88:
+ case MESA_FORMAT_A8:
+ case MESA_FORMAT_I8:
+ case MESA_FORMAT_L8:
+ case MESA_FORMAT_AL88:
+ case MESA_FORMAT_RGB565:
+ case MESA_FORMAT_ARGB1555:
+ case MESA_FORMAT_ARGB4444:
+ case MESA_FORMAT_S8:
+ return GL_UNSIGNED_BYTE;
+ case MESA_FORMAT_R16:
+ case MESA_FORMAT_RG1616:
+ case MESA_FORMAT_Z16:
+ return GL_UNSIGNED_SHORT;
+ case MESA_FORMAT_X8_Z24:
+ return GL_UNSIGNED_INT;
+ case MESA_FORMAT_S8_Z24:
+ return GL_UNSIGNED_INT_24_8_EXT;
+ case MESA_FORMAT_RGBA_FLOAT32:
+ case MESA_FORMAT_RG_FLOAT32:
+ case MESA_FORMAT_R_FLOAT32:
+ case MESA_FORMAT_INTENSITY_FLOAT32:
+ case MESA_FORMAT_LUMINANCE_FLOAT32:
+ case MESA_FORMAT_ALPHA_FLOAT32:
+ case MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32:
+ return GL_FLOAT;
+
+ /* The core depthstencil wrappers demand this. */
+ case MESA_FORMAT_Z32_FLOAT_X24S8:
+ return GL_FLOAT_32_UNSIGNED_INT_24_8_REV;
+
+ default:
+ /* Unsupported format. We may hit this when people ask for FBO-incomplete
+ * formats.
+ */
+ return GL_NONE;
+ }
+}
+
+int intel_compressed_num_bytes(GLuint mesaFormat)
+{
+ GLuint bw, bh;
+ GLuint block_size;
+
+ block_size = _mesa_get_format_bytes(mesaFormat);
+ _mesa_get_format_block_size(mesaFormat, &bw, &bh);
+
+ return block_size / bw;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_image.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_image.c
index 567abe4974e..fe9040cf1b6 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_image.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_image.c
@@ -1 +1,360 @@
-../intel/intel_tex_image.c \ No newline at end of file
+
+#include "main/glheader.h"
+#include "main/macros.h"
+#include "main/mfeatures.h"
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/bufferobj.h"
+#include "main/context.h"
+#include "main/formats.h"
+#include "main/pbo.h"
+#include "main/renderbuffer.h"
+#include "main/texcompress.h"
+#include "main/texgetimage.h"
+#include "main/texobj.h"
+#include "main/teximage.h"
+#include "main/texstore.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+#include "intel_fbo.h"
+#include "intel_span.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/* Work back from the specified level of the image to the baselevel and create a
+ * miptree of that size.
+ */
+struct intel_mipmap_tree *
+intel_miptree_create_for_teximage(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage,
+ bool expect_accelerated_upload)
+{
+ GLuint firstLevel;
+ GLuint lastLevel;
+ int width, height, depth;
+ GLuint i;
+
+ intel_miptree_get_dimensions_for_image(&intelImage->base.Base,
+ &width, &height, &depth);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->base.Base.Level > intelObj->base.BaseLevel &&
+ (width == 1 ||
+ (intelObj->base.Target != GL_TEXTURE_1D && height == 1) ||
+ (intelObj->base.Target == GL_TEXTURE_3D && depth == 1))) {
+ /* For this combination, we're at some lower mipmap level and
+ * some important dimension is 1. We can't extrapolate up to a
+ * likely base level width/height/depth for a full mipmap stack
+ * from this info, so just allocate this one level.
+ */
+ firstLevel = intelImage->base.Base.Level;
+ lastLevel = intelImage->base.Base.Level;
+ } else {
+ /* If this image disrespects BaseLevel, allocate from level zero.
+ * Usually BaseLevel == 0, so it's unlikely to happen.
+ */
+ if (intelImage->base.Base.Level < intelObj->base.BaseLevel)
+ firstLevel = 0;
+ else
+ firstLevel = intelObj->base.BaseLevel;
+
+ /* Figure out image dimensions at start level. */
+ for (i = intelImage->base.Base.Level; i > firstLevel; i--) {
+ width <<= 1;
+ if (height != 1)
+ height <<= 1;
+ if (depth != 1)
+ depth <<= 1;
+ }
+
+ /* Guess a reasonable value for lastLevel. This is probably going
+ * to be wrong fairly often and might mean that we have to look at
+ * resizable buffers, or require that buffers implement lazy
+ * pagetable arrangements.
+ */
+ if ((intelObj->base.Sampler.MinFilter == GL_NEAREST ||
+ intelObj->base.Sampler.MinFilter == GL_LINEAR) &&
+ intelImage->base.Base.Level == firstLevel &&
+ (intel->gen < 4 || firstLevel == 0)) {
+ lastLevel = firstLevel;
+ } else if (intelObj->base.Target == GL_TEXTURE_EXTERNAL_OES) {
+ lastLevel = firstLevel;
+ } else {
+ lastLevel = firstLevel + _mesa_logbase2(MAX2(MAX2(width, height), depth));
+ }
+ }
+
+ return intel_miptree_create(intel,
+ intelObj->base.Target,
+ intelImage->base.Base.TexFormat,
+ firstLevel,
+ lastLevel,
+ width,
+ height,
+ depth,
+ expect_accelerated_upload,
+ 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_NONE);
+}
+
+/* There are actually quite a few combinations this will work for,
+ * more than what I've listed here.
+ */
+static bool
+check_pbo_format(GLenum format, GLenum type,
+ gl_format mesa_format)
+{
+ switch (mesa_format) {
+ case MESA_FORMAT_ARGB8888:
+ return (format == GL_BGRA && (type == GL_UNSIGNED_BYTE ||
+ type == GL_UNSIGNED_INT_8_8_8_8_REV));
+ case MESA_FORMAT_RGB565:
+ return (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5);
+ case MESA_FORMAT_L8:
+ return (format == GL_LUMINANCE && type == GL_UNSIGNED_BYTE);
+ case MESA_FORMAT_YCBCR:
+ return (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE);
+ default:
+ return false;
+ }
+}
+
+
+/* XXX: Do this for TexSubImage also:
+ */
+static bool
+try_pbo_upload(struct gl_context *ctx,
+ struct gl_texture_image *image,
+ const struct gl_pixelstore_attrib *unpack,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_texture_image *intelImage = intel_texture_image(image);
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_x, dst_y, dst_stride;
+ drm_intel_bo *dst_buffer, *src_buffer;
+
+ if (!_mesa_is_bufferobj(unpack->BufferObj))
+ return false;
+
+ DBG("trying pbo upload\n");
+
+ if (intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ DBG("%s: image transfer\n", __FUNCTION__);
+ return false;
+ }
+
+ if (!check_pbo_format(format, type, intelImage->base.Base.TexFormat)) {
+ DBG("%s: format mismatch (upload to %s with format 0x%x, type 0x%x)\n",
+ __FUNCTION__, _mesa_get_format_name(intelImage->base.Base.TexFormat),
+ format, type);
+ return false;
+ }
+
+ ctx->Driver.AllocTextureImageBuffer(ctx, image);
+
+ if (!intelImage->mt) {
+ DBG("%s: no miptree\n", __FUNCTION__);
+ return false;
+ }
+
+ dst_buffer = intelImage->mt->region->bo;
+ src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset);
+ /* note: potential 64-bit ptr to 32-bit int cast */
+ src_offset += (GLuint) (unsigned long) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = image->Width;
+
+ intel_miptree_get_image_offset(intelImage->mt, intelImage->base.Base.Level,
+ intelImage->base.Base.Face, 0,
+ &dst_x, &dst_y);
+
+ dst_stride = intelImage->mt->region->pitch;
+
+ if (!intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_stride, src_buffer,
+ src_offset, false,
+ dst_stride, dst_buffer, 0,
+ intelImage->mt->region->tiling,
+ 0, 0, dst_x, dst_y, image->Width, image->Height,
+ GL_COPY)) {
+ DBG("%s: blit failed\n", __FUNCTION__);
+ return false;
+ }
+
+ DBG("%s: success\n", __FUNCTION__);
+ return true;
+}
+
+static void
+intelTexImage(struct gl_context * ctx,
+ GLuint dims,
+ struct gl_texture_image *texImage,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack)
+{
+ DBG("%s target %s level %d %dx%dx%d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(texImage->TexObject->Target),
+ texImage->Level, texImage->Width, texImage->Height, texImage->Depth);
+
+ /* Attempt to use the blitter for PBO image uploads.
+ */
+ if (dims <= 2 &&
+ try_pbo_upload(ctx, texImage, unpack, format, type, pixels)) {
+ return;
+ }
+
+ DBG("%s: upload image %dx%dx%d pixels %p\n",
+ __FUNCTION__, texImage->Width, texImage->Height, texImage->Depth,
+ pixels);
+
+ _mesa_store_teximage(ctx, dims, texImage,
+ format, type, pixels, unpack);
+}
+
+
+/**
+ * Binds a region to a texture image, like it was uploaded by glTexImage2D().
+ *
+ * Used for GLX_EXT_texture_from_pixmap and EGL image extensions,
+ */
+static void
+intel_set_texture_image_region(struct gl_context *ctx,
+ struct gl_texture_image *image,
+ struct intel_region *region,
+ GLenum target,
+ GLenum internalFormat,
+ gl_format format,
+ uint32_t offset)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct gl_texture_object *texobj = image->TexObject;
+ struct intel_texture_object *intel_texobj = intel_texture_object(texobj);
+
+ _mesa_init_teximage_fields(&intel->ctx, image,
+ region->width, region->height, 1,
+ 0, internalFormat, format);
+
+ ctx->Driver.FreeTextureImageBuffer(ctx, image);
+
+ intel_image->mt = intel_miptree_create_for_region(intel, target,
+ image->TexFormat,
+ region);
+ if (intel_image->mt == NULL)
+ return;
+
+ intel_image->mt->offset = offset;
+ intel_image->base.RowStride = region->pitch;
+
+ /* Immediately validate the image to the object. */
+ intel_miptree_reference(&intel_texobj->mt, intel_image->mt);
+}
+
+void
+intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
+ GLint texture_format,
+ __DRIdrawable *dPriv)
+{
+ struct gl_framebuffer *fb = dPriv->driverPrivate;
+ struct intel_context *intel = pDRICtx->driverPrivate;
+ struct gl_context *ctx = &intel->ctx;
+ struct intel_texture_object *intelObj;
+ struct intel_renderbuffer *rb;
+ struct gl_texture_object *texObj;
+ struct gl_texture_image *texImage;
+ int level = 0, internalFormat = 0;
+ gl_format texFormat = MESA_FORMAT_NONE;
+
+ texObj = _mesa_get_current_tex_object(ctx, target);
+ intelObj = intel_texture_object(texObj);
+
+ if (!intelObj)
+ return;
+
+ if (dPriv->lastStamp != dPriv->dri2.stamp ||
+ !pDRICtx->driScreenPriv->dri2.useInvalidate)
+ intel_update_renderbuffers(pDRICtx, dPriv);
+
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ /* If the region isn't set, then intel_update_renderbuffers was unable
+ * to get the buffers for the drawable.
+ */
+ if (!rb || !rb->mt)
+ return;
+
+ if (rb->mt->cpp == 4) {
+ if (texture_format == __DRI_TEXTURE_FORMAT_RGB) {
+ internalFormat = GL_RGB;
+ texFormat = MESA_FORMAT_XRGB8888;
+ }
+ else {
+ internalFormat = GL_RGBA;
+ texFormat = MESA_FORMAT_ARGB8888;
+ }
+ } else if (rb->mt->cpp == 2) {
+ internalFormat = GL_RGB;
+ texFormat = MESA_FORMAT_RGB565;
+ }
+
+ _mesa_lock_texture(&intel->ctx, texObj);
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ intel_set_texture_image_region(ctx, texImage, rb->mt->region, target,
+ internalFormat, texFormat, 0);
+ _mesa_unlock_texture(&intel->ctx, texObj);
+}
+
+void
+intelSetTexBuffer(__DRIcontext *pDRICtx, GLint target, __DRIdrawable *dPriv)
+{
+ /* The old interface didn't have the format argument, so copy our
+ * implementation's behavior at the time.
+ */
+ intelSetTexBuffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
+}
+
+#if FEATURE_OES_EGL_image
+static void
+intel_image_target_texture_2d(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLeglImageOES image_handle)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = intel->intelScreen->driScrnPriv;
+ image = screen->dri2.image->lookupEGLImage(screen, image_handle,
+ screen->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ intel_set_texture_image_region(ctx, texImage, image->region,
+ target, image->internal_format,
+ image->format, image->offset);
+}
+#endif
+
+void
+intelInitTextureImageFuncs(struct dd_function_table *functions)
+{
+ functions->TexImage = intelTexImage;
+
+#if FEATURE_OES_EGL_image
+ functions->EGLImageTargetTexture2D = intel_image_target_texture_2d;
+#endif
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_layout.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_layout.c
index fe61b441945..65645bc46a4 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_layout.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_layout.c
@@ -1 +1,206 @@
-../intel/intel_tex_layout.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+#include "intel_mipmap_tree.h"
+#include "intel_tex_layout.h"
+#include "intel_context.h"
+
+#include "main/image.h"
+#include "main/macros.h"
+
+static unsigned int
+intel_horizontal_texture_alignment_unit(struct intel_context *intel,
+ gl_format format)
+{
+ /**
+ * From the "Alignment Unit Size" section of various specs, namely:
+ * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
+ * - i965 and G45 PRMs: Volume 1, Section 6.17.3.4.
+ * - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4
+ * - BSpec (for Ivybridge and slight variations in separate stencil)
+ *
+ * +----------------------------------------------------------------------+
+ * | | alignment unit width ("i") |
+ * | Surface Property |-----------------------------|
+ * | | 915 | 965 | ILK | SNB | IVB |
+ * +----------------------------------------------------------------------+
+ * | YUV 4:2:2 format | 8 | 4 | 4 | 4 | 4 |
+ * | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 |
+ * | FXT1 compressed format | 8 | 8 | 8 | 8 | 8 |
+ * | Depth Buffer (16-bit) | 4 | 4 | 4 | 4 | 8 |
+ * | Depth Buffer (other) | 4 | 4 | 4 | 4 | 4 |
+ * | Separate Stencil Buffer | N/A | N/A | 8 | 8 | 8 |
+ * | All Others | 4 | 4 | 4 | 4 | 4 |
+ * +----------------------------------------------------------------------+
+ *
+ * On IVB+, non-special cases can be overridden by setting the SURFACE_STATE
+ * "Surface Horizontal Alignment" field to HALIGN_4 or HALIGN_8.
+ */
+ if (_mesa_is_format_compressed(format)) {
+ /* The hardware alignment requirements for compressed textures
+ * happen to match the block boundaries.
+ */
+ unsigned int i, j;
+ _mesa_get_format_block_size(format, &i, &j);
+ return i;
+ }
+
+ if (format == MESA_FORMAT_S8)
+ return 8;
+
+ if (intel->gen >= 7 && format == MESA_FORMAT_Z16)
+ return 8;
+
+ return 4;
+}
+
+static unsigned int
+intel_vertical_texture_alignment_unit(struct intel_context *intel,
+ gl_format format)
+{
+ /**
+ * From the "Alignment Unit Size" section of various specs, namely:
+ * - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
+ * - i965 and G45 PRMs: Volume 1, Section 6.17.3.4.
+ * - Ironlake and Sandybridge PRMs: Volume 1, Part 1, Section 7.18.3.4
+ * - BSpec (for Ivybridge and slight variations in separate stencil)
+ *
+ * +----------------------------------------------------------------------+
+ * | | alignment unit height ("j") |
+ * | Surface Property |-----------------------------|
+ * | | 915 | 965 | ILK | SNB | IVB |
+ * +----------------------------------------------------------------------+
+ * | BC1-5 compressed format (DXTn/S3TC) | 4 | 4 | 4 | 4 | 4 |
+ * | FXT1 compressed format | 4 | 4 | 4 | 4 | 4 |
+ * | Depth Buffer | 2 | 2 | 2 | 4 | 4 |
+ * | Separate Stencil Buffer | N/A | N/A | N/A | 4 | 8 |
+ * | Multisampled (4x or 8x) render target | N/A | N/A | N/A | 4 | 4 |
+ * | All Others | 2 | 2 | 2 | 2 | 2 |
+ * +----------------------------------------------------------------------+
+ *
+ * On SNB+, non-special cases can be overridden by setting the SURFACE_STATE
+ * "Surface Vertical Alignment" field to VALIGN_2 or VALIGN_4.
+ *
+ * We currently don't support multisampling.
+ */
+ if (_mesa_is_format_compressed(format))
+ return 4;
+
+ if (format == MESA_FORMAT_S8)
+ return intel->gen >= 7 ? 8 : 4;
+
+ GLenum base_format = _mesa_get_format_base_format(format);
+
+ if (intel->gen >= 6 &&
+ (base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL)) {
+ return 4;
+ }
+
+ return 2;
+}
+
+void
+intel_get_texture_alignment_unit(struct intel_context *intel,
+ gl_format format,
+ unsigned int *w, unsigned int *h)
+{
+ *w = intel_horizontal_texture_alignment_unit(intel, format);
+ *h = intel_vertical_texture_alignment_unit(intel, format);
+}
+
+void i945_miptree_layout_2d(struct intel_mipmap_tree *mt)
+{
+ GLuint level;
+ GLuint x = 0;
+ GLuint y = 0;
+ GLuint width = mt->width0;
+ GLuint height = mt->height0;
+ GLuint depth = mt->depth0; /* number of array layers. */
+
+ mt->total_width = mt->width0;
+
+ if (mt->compressed) {
+ mt->total_width = ALIGN(mt->width0, mt->align_w);
+ }
+
+ /* May need to adjust width to accomodate the placement of
+ * the 2nd mipmap. This occurs when the alignment
+ * constraints of mipmap placement push the right edge of the
+ * 2nd mipmap out past the width of its parent.
+ */
+ if (mt->first_level != mt->last_level) {
+ GLuint mip1_width;
+
+ if (mt->compressed) {
+ mip1_width = ALIGN(minify(mt->width0), mt->align_w)
+ + ALIGN(minify(minify(mt->width0)), mt->align_w);
+ } else {
+ mip1_width = ALIGN(minify(mt->width0), mt->align_w)
+ + minify(minify(mt->width0));
+ }
+
+ if (mip1_width > mt->total_width) {
+ mt->total_width = mip1_width;
+ }
+ }
+
+ mt->total_height = 0;
+
+ for ( level = mt->first_level ; level <= mt->last_level ; level++ ) {
+ GLuint img_height;
+
+ intel_miptree_set_level_info(mt, level, x, y, width,
+ height, depth);
+
+ img_height = ALIGN(height, mt->align_h);
+ if (mt->compressed)
+ img_height /= mt->align_h;
+
+ /* Because the images are packed better, the final offset
+ * might not be the maximal one:
+ */
+ mt->total_height = MAX2(mt->total_height, y + img_height);
+
+ /* Layout_below: step right after second mipmap.
+ */
+ if (level == mt->first_level + 1) {
+ x += ALIGN(width, mt->align_w);
+ }
+ else {
+ y += img_height;
+ }
+
+ width = minify(width);
+ height = minify(height);
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_subimage.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_subimage.c
index b3a8a3d7ca7..ae4b3bca305 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_subimage.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_subimage.c
@@ -1 +1,177 @@
-../intel/intel_tex_subimage.c \ No newline at end of file
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mtypes.h"
+#include "main/pbo.h"
+#include "main/texobj.h"
+#include "main/texstore.h"
+#include "main/texcompress.h"
+#include "main/enums.h"
+
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "intel_mipmap_tree.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static bool
+intel_blit_texsubimage(struct gl_context * ctx,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLuint dstRowStride = 0;
+ drm_intel_bo *temp_bo = NULL;
+ unsigned int blit_x = 0, blit_y = 0;
+ unsigned long pitch;
+ uint32_t tiling_mode = I915_TILING_NONE;
+ GLubyte *dstMap;
+
+ /* Try to do a blit upload of the subimage if the texture is
+ * currently busy.
+ */
+ if (!intelImage->mt)
+ return false;
+
+ /* The blitter can't handle Y tiling */
+ if (intelImage->mt->region->tiling == I915_TILING_Y)
+ return false;
+
+ if (texImage->TexObject->Target != GL_TEXTURE_2D)
+ return false;
+
+ /* On gen6, it's probably not worth swapping to the blit ring to do
+ * this because of all the overhead involved.
+ */
+ if (intel->gen >= 6)
+ return false;
+
+ if (!drm_intel_bo_busy(intelImage->mt->region->bo))
+ return false;
+
+ DBG("BLT subimage %s target %s level %d offset %d,%d %dx%d\n",
+ __FUNCTION__,
+ _mesa_lookup_enum_by_nr(texImage->TexObject->Target),
+ texImage->Level, xoffset, yoffset, width, height);
+
+ pixels = _mesa_validate_pbo_teximage(ctx, 2, width, height, 1,
+ format, type, pixels, packing,
+ "glTexSubImage");
+ if (!pixels)
+ return false;
+
+ temp_bo = drm_intel_bo_alloc_tiled(intel->bufmgr,
+ "subimage blit bo",
+ width, height,
+ intelImage->mt->cpp,
+ &tiling_mode,
+ &pitch,
+ 0);
+ if (temp_bo == NULL) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ return false;
+ }
+
+ if (drm_intel_gem_bo_map_gtt(temp_bo)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ return false;
+ }
+
+ dstMap = temp_bo->virtual;
+ dstRowStride = pitch;
+
+ intel_miptree_get_image_offset(intelImage->mt, texImage->Level,
+ intelImage->base.Base.Face, 0,
+ &blit_x, &blit_y);
+ blit_x += xoffset;
+ blit_y += yoffset;
+
+ if (!_mesa_texstore(ctx, 2, texImage->_BaseFormat,
+ texImage->TexFormat,
+ dstRowStride,
+ &dstMap,
+ width, height, 1,
+ format, type, pixels, packing)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ }
+
+ bool ret;
+ unsigned int dst_pitch = intelImage->mt->region->pitch *
+ intelImage->mt->cpp;
+
+ drm_intel_gem_bo_unmap_gtt(temp_bo);
+
+ ret = intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ dstRowStride / intelImage->mt->cpp,
+ temp_bo, 0, false,
+ dst_pitch / intelImage->mt->cpp,
+ intelImage->mt->region->bo, 0,
+ intelImage->mt->region->tiling,
+ 0, 0, blit_x, blit_y, width, height,
+ GL_COPY);
+ assert(ret);
+
+ drm_intel_bo_unreference(temp_bo);
+ _mesa_unmap_teximage_pbo(ctx, packing);
+
+ return true;
+}
+
+static void
+intelTexSubImage(struct gl_context * ctx,
+ GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing)
+{
+ /* The intel_blit_texsubimage() function only handles 2D images */
+ if (dims != 2 || !intel_blit_texsubimage(ctx, texImage,
+ xoffset, yoffset,
+ width, height,
+ format, type, pixels, packing)) {
+ _mesa_store_texsubimage(ctx, dims, texImage,
+ xoffset, yoffset, zoffset,
+ width, height, depth,
+ format, type, pixels, packing);
+ }
+}
+
+void
+intelInitTextureSubImageFuncs(struct dd_function_table *functions)
+{
+ functions->TexSubImage = intelTexSubImage;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_validate.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_validate.c
index 41a75674c27..578b4173512 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_validate.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/i965/intel_tex_validate.c
@@ -1 +1,218 @@
-../intel/intel_tex_validate.c \ No newline at end of file
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/samplerobj.h"
+#include "main/texobj.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_blit.h"
+#include "intel_tex.h"
+#include "intel_tex_layout.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * When validating, we only care about the texture images that could
+ * be seen, so for non-mipmapped modes we want to ignore everything
+ * but BaseLevel.
+ */
+static void
+intel_update_max_level(struct intel_texture_object *intelObj,
+ struct gl_sampler_object *sampler)
+{
+ struct gl_texture_object *tObj = &intelObj->base;
+
+ if (sampler->MinFilter == GL_NEAREST ||
+ sampler->MinFilter == GL_LINEAR) {
+ intelObj->_MaxLevel = tObj->BaseLevel;
+ } else {
+ intelObj->_MaxLevel = tObj->_MaxLevel;
+ }
+}
+
+/*
+ */
+GLuint
+intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
+{
+ struct gl_context *ctx = &intel->ctx;
+ struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+ struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
+ GLuint face, i;
+ GLuint nr_faces = 0;
+ struct intel_texture_image *firstImage;
+ int width, height, depth;
+
+ /* TBOs require no validation -- they always just point to their BO. */
+ if (tObj->Target == GL_TEXTURE_BUFFER)
+ return true;
+
+ /* We know/require this is true by now:
+ */
+ assert(intelObj->base._BaseComplete);
+
+ /* What levels must the tree include at a minimum?
+ */
+ intel_update_max_level(intelObj, sampler);
+ firstImage = intel_texture_image(tObj->Image[0][tObj->BaseLevel]);
+
+ /* Check tree can hold all active levels. Check tree matches
+ * target, imageFormat, etc.
+ *
+ * For pre-gen4, we have to match first_level == tObj->BaseLevel,
+ * because we don't have the control that gen4 does to make min/mag
+ * determination happen at a nonzero (hardware) baselevel. Because
+ * of that, we just always relayout on baselevel change.
+ */
+ if (intelObj->mt &&
+ (!intel_miptree_match_image(intelObj->mt, &firstImage->base.Base) ||
+ intelObj->mt->first_level != tObj->BaseLevel ||
+ intelObj->mt->last_level < intelObj->_MaxLevel)) {
+ intel_miptree_release(&intelObj->mt);
+ }
+
+
+ /* May need to create a new tree:
+ */
+ if (!intelObj->mt) {
+ intel_miptree_get_dimensions_for_image(&firstImage->base.Base,
+ &width, &height, &depth);
+
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ firstImage->base.Base.TexFormat,
+ tObj->BaseLevel,
+ intelObj->_MaxLevel,
+ width,
+ height,
+ depth,
+ true,
+ 0 /* num_samples */,
+ INTEL_MSAA_LAYOUT_NONE);
+ if (!intelObj->mt)
+ return false;
+ }
+
+ /* Pull in any images not in the object's tree:
+ */
+ nr_faces = _mesa_num_tex_faces(intelObj->base.Target);
+ for (face = 0; face < nr_faces; face++) {
+ for (i = tObj->BaseLevel; i <= intelObj->_MaxLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+ /* skip too small size mipmap */
+ if (intelImage == NULL)
+ break;
+
+ if (intelObj->mt != intelImage->mt) {
+ intel_miptree_copy_teximage(intel, intelImage, intelObj->mt);
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
+ */
+static void
+intel_tex_map_image_for_swrast(struct intel_context *intel,
+ struct intel_texture_image *intel_image,
+ GLbitfield mode)
+{
+ int level;
+ int face;
+ struct intel_mipmap_tree *mt;
+ unsigned int x, y;
+
+ if (!intel_image || !intel_image->mt)
+ return;
+
+ level = intel_image->base.Base.Level;
+ face = intel_image->base.Base.Face;
+ mt = intel_image->mt;
+
+ for (int i = 0; i < mt->level[level].depth; i++)
+ intel_miptree_slice_resolve_depth(intel, mt, level, i);
+
+ if (mt->target == GL_TEXTURE_3D ||
+ mt->target == GL_TEXTURE_2D_ARRAY ||
+ mt->target == GL_TEXTURE_1D_ARRAY) {
+ int i;
+
+ /* ImageOffsets[] is only used for swrast's fetch_texel_3d, so we can't
+ * share code with the normal path.
+ */
+ for (i = 0; i < mt->level[level].depth; i++) {
+ intel_miptree_get_image_offset(mt, level, face, i, &x, &y);
+ intel_image->base.ImageOffsets[i] = x + y * mt->region->pitch;
+ }
+
+ DBG("%s \n", __FUNCTION__);
+
+ intel_image->base.Map = intel_region_map(intel, mt->region, mode);
+ } else {
+ assert(intel_image->base.Base.Depth == 1);
+ intel_miptree_get_image_offset(mt, level, face, 0, &x, &y);
+
+ DBG("%s: (%d,%d) -> (%d, %d)/%d\n",
+ __FUNCTION__, face, level, x, y, mt->region->pitch * mt->cpp);
+
+ intel_image->base.Map = intel_region_map(intel, mt->region, mode) +
+ (x + y * mt->region->pitch) * mt->cpp;
+ }
+
+ intel_image->base.RowStride = mt->region->pitch;
+}
+
+static void
+intel_tex_unmap_image_for_swrast(struct intel_context *intel,
+ struct intel_texture_image *intel_image)
+{
+ if (intel_image && intel_image->mt) {
+ intel_region_unmap(intel, intel_image->mt->region);
+ intel_image->base.Map = NULL;
+ }
+}
+
+/**
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
+ */
+void
+intel_tex_map_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ GLbitfield mode)
+{
+ GLuint nr_faces = _mesa_num_tex_faces(intelObj->base.Target);
+ int i, face;
+
+ DBG("%s\n", __FUNCTION__);
+
+ for (i = intelObj->base.BaseLevel; i <= intelObj->_MaxLevel; i++) {
+ for (face = 0; face < nr_faces; face++) {
+ struct intel_texture_image *intel_image =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ intel_tex_map_image_for_swrast(intel, intel_image, mode);
+ }
+ }
+}
+
+void
+intel_tex_unmap_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ GLuint nr_faces = _mesa_num_tex_faces(intelObj->base.Target);
+ int i, face;
+
+ for (i = intelObj->base.BaseLevel; i <= intelObj->_MaxLevel; i++) {
+ for (face = 0; face < nr_faces; face++) {
+ struct intel_texture_image *intel_image =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ intel_tex_unmap_image_for_swrast(intel, intel_image);
+ }
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.c
index f6a5f664701..5abc52ba3f7 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.c
@@ -1 +1,233 @@
-../radeon/radeon_buffer_objects.c \ No newline at end of file
+/*
+ * Copyright 2009 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "radeon_buffer_objects.h"
+
+#include "main/imports.h"
+#include "main/mtypes.h"
+#include "main/bufferobj.h"
+
+#include "radeon_common.h"
+
+struct radeon_buffer_object *
+get_radeon_buffer_object(struct gl_buffer_object *obj)
+{
+ return (struct radeon_buffer_object *) obj;
+}
+
+static struct gl_buffer_object *
+radeonNewBufferObject(struct gl_context * ctx,
+ GLuint name,
+ GLenum target)
+{
+ struct radeon_buffer_object *obj = CALLOC_STRUCT(radeon_buffer_object);
+
+ _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
+
+ obj->bo = NULL;
+
+ return &obj->Base;
+}
+
+/**
+ * Called via glDeleteBuffersARB().
+ */
+static void
+radeonDeleteBufferObject(struct gl_context * ctx,
+ struct gl_buffer_object *obj)
+{
+ struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+
+ if (obj->Pointer) {
+ radeon_bo_unmap(radeon_obj->bo);
+ }
+
+ if (radeon_obj->bo) {
+ radeon_bo_unref(radeon_obj->bo);
+ }
+
+ free(radeon_obj);
+}
+
+
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via ctx->Driver.BufferData().
+ * \return GL_TRUE for success, GL_FALSE if out of memory
+ */
+static GLboolean
+radeonBufferData(struct gl_context * ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const GLvoid * data,
+ GLenum usage,
+ struct gl_buffer_object *obj)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+
+ radeon_obj->Base.Size = size;
+ radeon_obj->Base.Usage = usage;
+
+ if (radeon_obj->bo != NULL) {
+ radeon_bo_unref(radeon_obj->bo);
+ radeon_obj->bo = NULL;
+ }
+
+ if (size != 0) {
+ radeon_obj->bo = radeon_bo_open(radeon->radeonScreen->bom,
+ 0,
+ size,
+ 32,
+ RADEON_GEM_DOMAIN_GTT,
+ 0);
+
+ if (!radeon_obj->bo)
+ return GL_FALSE;
+
+ if (data != NULL) {
+ radeon_bo_map(radeon_obj->bo, GL_TRUE);
+
+ memcpy(radeon_obj->bo->ptr, data, size);
+
+ radeon_bo_unmap(radeon_obj->bo);
+ }
+ }
+ return GL_TRUE;
+}
+
+/**
+ * Replace data in a subrange of buffer object. If the data range
+ * specified by size + offset extends beyond the end of the buffer or
+ * if data is NULL, no copy is performed.
+ * Called via glBufferSubDataARB().
+ */
+static void
+radeonBufferSubData(struct gl_context * ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const GLvoid * data,
+ struct gl_buffer_object *obj)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+
+ if (radeon_bo_is_referenced_by_cs(radeon_obj->bo, radeon->cmdbuf.cs)) {
+ radeon_firevertices(radeon);
+ }
+
+ radeon_bo_map(radeon_obj->bo, GL_TRUE);
+
+ memcpy(radeon_obj->bo->ptr + offset, data, size);
+
+ radeon_bo_unmap(radeon_obj->bo);
+}
+
+/**
+ * Called via glGetBufferSubDataARB()
+ */
+static void
+radeonGetBufferSubData(struct gl_context * ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ GLvoid * data,
+ struct gl_buffer_object *obj)
+{
+ struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+
+ radeon_bo_map(radeon_obj->bo, GL_FALSE);
+
+ memcpy(data, radeon_obj->bo->ptr + offset, size);
+
+ radeon_bo_unmap(radeon_obj->bo);
+}
+
+/**
+ * Called via glMapBuffer() and glMapBufferRange()
+ */
+static void *
+radeonMapBufferRange(struct gl_context * ctx,
+ GLintptr offset, GLsizeiptr length,
+ GLbitfield access, struct gl_buffer_object *obj)
+{
+ struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+ const GLboolean write_only =
+ (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_WRITE_BIT;
+
+ if (write_only) {
+ ctx->Driver.Flush(ctx);
+ }
+
+ if (radeon_obj->bo == NULL) {
+ obj->Pointer = NULL;
+ return NULL;
+ }
+
+ obj->Offset = offset;
+ obj->Length = length;
+ obj->AccessFlags = access;
+
+ radeon_bo_map(radeon_obj->bo, write_only);
+
+ obj->Pointer = radeon_obj->bo->ptr + offset;
+ return obj->Pointer;
+}
+
+
+/**
+ * Called via glUnmapBufferARB()
+ */
+static GLboolean
+radeonUnmapBuffer(struct gl_context * ctx,
+ struct gl_buffer_object *obj)
+{
+ struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
+
+ if (radeon_obj->bo != NULL) {
+ radeon_bo_unmap(radeon_obj->bo);
+ }
+
+ obj->Pointer = NULL;
+ obj->Offset = 0;
+ obj->Length = 0;
+
+ return GL_TRUE;
+}
+
+void
+radeonInitBufferObjectFuncs(struct dd_function_table *functions)
+{
+ functions->NewBufferObject = radeonNewBufferObject;
+ functions->DeleteBuffer = radeonDeleteBufferObject;
+ functions->BufferData = radeonBufferData;
+ functions->BufferSubData = radeonBufferSubData;
+ functions->GetBufferSubData = radeonGetBufferSubData;
+ functions->MapBufferRange = radeonMapBufferRange;
+ functions->UnmapBuffer = radeonUnmapBuffer;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.h
index 2f134fd17b8..d681960825a 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_buffer_objects.h
@@ -1 +1,52 @@
-../radeon/radeon_buffer_objects.h \ No newline at end of file
+/*
+ * Copyright 2009 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_BUFFER_OBJECTS_H
+#define RADEON_BUFFER_OBJECTS_H
+
+#include "main/mtypes.h"
+
+struct radeon_bo;
+
+/**
+ * Radeon vertex/pixel buffer object, derived from Mesa's gl_buffer_object.
+ */
+struct radeon_buffer_object
+{
+ struct gl_buffer_object Base;
+ struct radeon_bo *bo;
+};
+
+struct radeon_buffer_object *
+get_radeon_buffer_object(struct gl_buffer_object *obj);
+
+/**
+ * Hook the bufferobject implementation into mesa:
+ */
+void radeonInitBufferObjectFuncs(struct dd_function_table *functions);
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_chipset.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_chipset.h
index eba99001ff8..023c12c790d 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_chipset.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_chipset.h
@@ -1 +1,41 @@
-../radeon/radeon_chipset.h \ No newline at end of file
+#ifndef _RADEON_CHIPSET_H
+#define _RADEON_CHIPSET_H
+
+/* General chip classes:
+ * r100 includes R100, RV100, RV200, RS100, RS200, RS250.
+ * r200 includes R200, RV250, RV280, RS300.
+ * (RS* denotes IGP)
+ */
+
+enum {
+#define CHIPSET(id, name, family) PCI_CHIP_##name = id,
+#if defined(RADEON_R100)
+#include "pci_ids/radeon_pci_ids.h"
+#elif defined(RADEON_R200)
+#include "pci_ids/r200_pci_ids.h"
+#endif
+#undef CHIPSET
+};
+
+enum {
+#if defined(RADEON_R100)
+ CHIP_FAMILY_R100,
+ CHIP_FAMILY_RV100,
+ CHIP_FAMILY_RS100,
+ CHIP_FAMILY_RV200,
+ CHIP_FAMILY_RS200,
+#elif defined(RADEON_R200)
+ CHIP_FAMILY_R200,
+ CHIP_FAMILY_RV250,
+ CHIP_FAMILY_RS300,
+ CHIP_FAMILY_RV280,
+#endif
+ CHIP_FAMILY_LAST
+};
+
+#define RADEON_CHIPSET_TCL (1 << 0) /* tcl support - any radeon */
+#define RADEON_CHIPSET_BROKEN_STENCIL (1 << 1) /* r100 stencil bug */
+#define R200_CHIPSET_YCBCR_BROKEN (1 << 2) /* r200 ycbcr bug */
+#define RADEON_CHIPSET_DEPTH_ALWAYS_TILED (1 << 3) /* M7 and R200s */
+
+#endif /* _RADEON_CHIPSET_H */
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_cmdbuf.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_cmdbuf.h
index a799e1dc6df..9f8d6cc49de 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_cmdbuf.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_cmdbuf.h
@@ -1 +1,113 @@
-../radeon/radeon_cmdbuf.h \ No newline at end of file
+#ifndef COMMON_CMDBUF_H
+#define COMMON_CMDBUF_H
+
+GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller);
+int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller);
+int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller);
+void rcommonInitCmdBuf(radeonContextPtr rmesa);
+void rcommonDestroyCmdBuf(radeonContextPtr rmesa);
+
+void rcommonBeginBatch(radeonContextPtr rmesa,
+ int n,
+ int dostate,
+ const char *file,
+ const char *function,
+ int line);
+
+/* +r6/r7 : code here moved */
+
+#define CP_PACKET2 (2 << 30)
+#define CP_PACKET0(reg, n) (RADEON_CP_PACKET0 | ((n)<<16) | ((reg)>>2))
+#define CP_PACKET0_ONE(reg, n) (RADEON_CP_PACKET0 | RADEON_CP_PACKET0_ONE_REG_WR | ((n)<<16) | ((reg)>>2))
+#define CP_PACKET3(pkt, n) (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
+
+/**
+ * Every function writing to the command buffer needs to declare this
+ * to get the necessary local variables.
+ */
+#define BATCH_LOCALS(rmesa) \
+ const radeonContextPtr b_l_rmesa = rmesa
+
+/**
+ * Prepare writing n dwords to the command buffer,
+ * including producing any necessary state emits on buffer wraparound.
+ */
+#define BEGIN_BATCH(n) rcommonBeginBatch(b_l_rmesa, n, 1, __FILE__, __FUNCTION__, __LINE__)
+
+/**
+ * Same as BEGIN_BATCH, but do not cause automatic state emits.
+ */
+#define BEGIN_BATCH_NO_AUTOSTATE(n) rcommonBeginBatch(b_l_rmesa, n, 0, __FILE__, __FUNCTION__, __LINE__)
+
+/**
+ * Write one dword to the command buffer.
+ */
+#define OUT_BATCH(data) \
+ do { \
+ radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, data);\
+ } while(0)
+
+/**
+ * Write a relocated dword to the command buffer.
+ */
+#define OUT_BATCH_RELOC(data, bo, offset, rd, wd, flags) \
+ do { \
+ int __offset = (offset); \
+ if (0 && __offset) { \
+ fprintf(stderr, "(%s:%s:%d) offset : %d\n", \
+ __FILE__, __FUNCTION__, __LINE__, __offset); \
+ } \
+ radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, __offset); \
+ radeon_cs_write_reloc(b_l_rmesa->cmdbuf.cs, \
+ bo, rd, wd, flags); \
+ } while(0)
+
+
+/**
+ * Write n dwords from ptr to the command buffer.
+ */
+#define OUT_BATCH_TABLE(ptr,n) \
+ do { \
+ radeon_cs_write_table(b_l_rmesa->cmdbuf.cs, (ptr), (n));\
+ } while(0)
+
+/**
+ * Finish writing dwords to the command buffer.
+ * The number of (direct or indirect) OUT_BATCH calls between the previous
+ * BEGIN_BATCH and END_BATCH must match the number specified at BEGIN_BATCH time.
+ */
+#define END_BATCH() \
+ do { \
+ radeon_cs_end(b_l_rmesa->cmdbuf.cs, __FILE__, __FUNCTION__, __LINE__);\
+ } while(0)
+
+/**
+ * After the last END_BATCH() of rendering, this indicates that flushing
+ * the command buffer now is okay.
+ */
+#define COMMIT_BATCH() \
+ do { \
+ } while(0)
+
+
+/** Single register write to command buffer; requires 2 dwords. */
+#define OUT_BATCH_REGVAL(reg, val) \
+ OUT_BATCH(cmdpacket0(b_l_rmesa->radeonScreen, (reg), 1)); \
+ OUT_BATCH((val))
+
+/** Continuous register range write to command buffer; requires 1 dword,
+ * expects count dwords afterwards for register contents. */
+#define OUT_BATCH_REGSEQ(reg, count) \
+ OUT_BATCH(cmdpacket0(b_l_rmesa->radeonScreen, (reg), (count)))
+
+/* +r6/r7 : code here moved */
+
+/* Fire the buffered vertices no matter what.
+ */
+static INLINE void radeon_firevertices(radeonContextPtr radeon)
+{
+ if (radeon->cmdbuf.cs->cdw || radeon->dma.flush )
+ radeon->glCtx->Driver.Flush(radeon->glCtx); /* +r6/r7 */
+}
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.c
index 67b19ba940d..c9f9b207811 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.c
@@ -1 +1,791 @@
-../radeon/radeon_common.c \ No newline at end of file
+/**************************************************************************
+
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+
+The Weather Channel (TM) funded Tungsten Graphics to develop the
+initial release of the Radeon 8500 driver under the XFree86 license.
+This notice must be preserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+/*
+ - Scissor implementation
+ - buffer swap/copy ioctls
+ - finish/flush
+ - state emission
+ - cmdbuffer management
+*/
+
+#include <errno.h>
+#include "main/glheader.h"
+#include "main/imports.h"
+#include "main/context.h"
+#include "main/enums.h"
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "drivers/common/meta.h"
+
+#include "radeon_common.h"
+#include "radeon_drm.h"
+#include "radeon_queryobj.h"
+
+/**
+ * Enable verbose debug output for emit code.
+ * 0 no output
+ * 1 most output
+ * 2 also print state alues
+ */
+#define RADEON_CMDBUF 0
+
+/* =============================================================
+ * Scissoring
+ */
+
+/**
+ * Update cliprects and scissors.
+ */
+void radeonSetCliprects(radeonContextPtr radeon)
+{
+ __DRIdrawable *const drawable = radeon_get_drawable(radeon);
+ __DRIdrawable *const readable = radeon_get_readable(radeon);
+
+ if(drawable == NULL && readable == NULL)
+ return;
+
+ struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
+ struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
+
+ if ((draw_rfb->base.Width != drawable->w) ||
+ (draw_rfb->base.Height != drawable->h)) {
+ _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
+ drawable->w, drawable->h);
+ draw_rfb->base.Initialized = GL_TRUE;
+ }
+
+ if (drawable != readable) {
+ if ((read_rfb->base.Width != readable->w) ||
+ (read_rfb->base.Height != readable->h)) {
+ _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
+ readable->w, readable->h);
+ read_rfb->base.Initialized = GL_TRUE;
+ }
+ }
+
+ if (radeon->state.scissor.enabled)
+ radeonUpdateScissor(radeon->glCtx);
+
+}
+
+
+
+void radeonUpdateScissor( struct gl_context *ctx )
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
+ GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
+ int x1, y1, x2, y2;
+ int min_x, min_y, max_x, max_y;
+
+ if (!ctx->DrawBuffer)
+ return;
+ min_x = min_y = 0;
+ max_x = ctx->DrawBuffer->Width - 1;
+ max_y = ctx->DrawBuffer->Height - 1;
+
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ x1 = x;
+ y1 = ctx->DrawBuffer->Height - (y + h);
+ x2 = x + w - 1;
+ y2 = y1 + h - 1;
+ } else {
+ x1 = x;
+ y1 = y;
+ x2 = x + w - 1;
+ y2 = y + h - 1;
+
+ }
+
+ rmesa->state.scissor.rect.x1 = CLAMP(x1, min_x, max_x);
+ rmesa->state.scissor.rect.y1 = CLAMP(y1, min_y, max_y);
+ rmesa->state.scissor.rect.x2 = CLAMP(x2, min_x, max_x);
+ rmesa->state.scissor.rect.y2 = CLAMP(y2, min_y, max_y);
+
+ if (rmesa->vtbl.update_scissor)
+ rmesa->vtbl.update_scissor(ctx);
+}
+
+/* =============================================================
+ * Scissoring
+ */
+
+void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ if (ctx->Scissor.Enabled) {
+ /* We don't pipeline cliprect changes */
+ radeon_firevertices(radeon);
+ radeonUpdateScissor(ctx);
+ }
+}
+
+/* ================================================================
+ * SwapBuffers with client-side throttling
+ */
+
+uint32_t radeonGetAge(radeonContextPtr radeon)
+{
+ drm_radeon_getparam_t gp;
+ int ret;
+ uint32_t age;
+
+ gp.param = RADEON_PARAM_LAST_CLEAR;
+ gp.value = (int *)&age;
+ ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
+ &gp, sizeof(gp));
+ if (ret) {
+ fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
+ ret);
+ exit(1);
+ }
+
+ return age;
+}
+
+/**
+ * Check if we're about to draw into the front color buffer.
+ * If so, set the intel->front_buffer_dirty field to true.
+ */
+void
+radeon_check_front_buffer_rendering(struct gl_context *ctx)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ if (fb->Name == 0) {
+ /* drawing to window system buffer */
+ if (fb->_NumColorDrawBuffers > 0) {
+ if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
+ radeon->front_buffer_dirty = GL_TRUE;
+ }
+ }
+ }
+}
+
+
+void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
+ *rrbColor = NULL;
+ uint32_t offset = 0;
+
+
+ if (!fb) {
+ /* this can happen during the initial context initialization */
+ return;
+ }
+
+ /* radeons only handle 1 color draw so far */
+ if (fb->_NumColorDrawBuffers != 1) {
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ return;
+ }
+
+ /* Do this here, note core Mesa, since this function is called from
+ * many places within the driver.
+ */
+ if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
+ /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
+ _mesa_update_framebuffer(ctx);
+ /* this updates the DrawBuffer's Width/Height if it's a FBO */
+ _mesa_update_draw_buffer_bounds(ctx);
+ }
+
+ if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
+ /* this may occur when we're called by glBindFrameBuffer() during
+ * the process of someone setting up renderbuffers, etc.
+ */
+ /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
+ return;
+ }
+
+ if (fb->Name)
+ ;/* do something depthy/stencily TODO */
+
+
+ /* none */
+ if (fb->Name == 0) {
+ if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
+ rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
+ radeon->front_cliprects = GL_TRUE;
+ } else {
+ rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
+ radeon->front_cliprects = GL_FALSE;
+ }
+ } else {
+ /* user FBO in theory */
+ struct radeon_renderbuffer *rrb;
+ rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
+ if (rrb) {
+ offset = rrb->draw_offset;
+ rrbColor = rrb;
+ }
+ }
+
+ if (rrbColor == NULL)
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ else
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
+
+
+ if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
+ rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
+ if (rrbDepth && rrbDepth->bo) {
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ } else {
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
+ }
+ } else {
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ rrbDepth = NULL;
+ }
+
+ if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
+ rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
+ if (rrbStencil && rrbStencil->bo) {
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ /* need to re-compute stencil hw state */
+ if (!rrbDepth)
+ rrbDepth = rrbStencil;
+ } else {
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
+ }
+ } else {
+ radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ if (ctx->Driver.Enable != NULL)
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
+ else
+ ctx->NewState |= _NEW_STENCIL;
+ }
+
+ /* Update culling direction which changes depending on the
+ * orientation of the buffer:
+ */
+ if (ctx->Driver.FrontFace)
+ ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
+ else
+ ctx->NewState |= _NEW_POLYGON;
+
+ /*
+ * Update depth test state
+ */
+ if (ctx->Driver.Enable) {
+ ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
+ (ctx->Depth.Test && fb->Visual.depthBits > 0));
+ /* Need to update the derived ctx->Stencil._Enabled first */
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
+ (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
+ } else {
+ ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
+ }
+
+ _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
+ _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
+ radeon->state.color.draw_offset = offset;
+
+#if 0
+ /* update viewport since it depends on window size */
+ if (ctx->Driver.Viewport) {
+ ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
+ ctx->Viewport.Width, ctx->Viewport.Height);
+ } else {
+
+ }
+#endif
+ ctx->NewState |= _NEW_VIEWPORT;
+
+ /* Set state we know depends on drawable parameters:
+ */
+ radeonUpdateScissor(ctx);
+ radeon->NewGLState |= _NEW_SCISSOR;
+
+ if (ctx->Driver.DepthRange)
+ ctx->Driver.DepthRange(ctx,
+ ctx->Viewport.Near,
+ ctx->Viewport.Far);
+
+ /* Update culling direction which changes depending on the
+ * orientation of the buffer:
+ */
+ if (ctx->Driver.FrontFace)
+ ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
+ else
+ ctx->NewState |= _NEW_POLYGON;
+}
+
+/**
+ * Called via glDrawBuffer.
+ */
+void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
+{
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr, "%s %s\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr( mode ));
+
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+
+ const GLboolean was_front_buffer_rendering =
+ radeon->is_front_buffer_rendering;
+
+ radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
+ (mode == GL_FRONT);
+
+ /* If we weren't front-buffer rendering before but we are now, make sure
+ * that the front-buffer has actually been allocated.
+ */
+ if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
+ radeon_update_renderbuffers(radeon->dri.context,
+ radeon->dri.context->driDrawablePriv, GL_FALSE);
+ }
+ }
+
+ radeon_draw_buffer(ctx, ctx->DrawBuffer);
+}
+
+void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
+{
+ if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
+ const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
+ rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
+ || (mode == GL_FRONT);
+
+ if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
+ radeon_update_renderbuffers(rmesa->dri.context,
+ rmesa->dri.context->driReadablePriv, GL_FALSE);
+ }
+ }
+ /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
+ if (ctx->ReadBuffer == ctx->DrawBuffer) {
+ /* This will update FBO completeness status.
+ * A framebuffer will be incomplete if the GL_READ_BUFFER setting
+ * refers to a missing renderbuffer. Calling glReadBuffer can set
+ * that straight and can make the drawing buffer complete.
+ */
+ radeon_draw_buffer(ctx, ctx->DrawBuffer);
+ }
+}
+
+void radeon_window_moved(radeonContextPtr radeon)
+{
+ /* Cliprects has to be updated before doing anything else */
+ radeonSetCliprects(radeon);
+}
+
+void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ __DRIcontext *driContext = radeon->dri.context;
+ void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
+ GLsizei w, GLsizei h);
+
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
+ if (radeon->is_front_buffer_rendering) {
+ ctx->Driver.Flush(ctx);
+ }
+ radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
+ if (driContext->driDrawablePriv != driContext->driReadablePriv)
+ radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
+ }
+
+ old_viewport = ctx->Driver.Viewport;
+ ctx->Driver.Viewport = NULL;
+ radeon_window_moved(radeon);
+ radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
+ ctx->Driver.Viewport = old_viewport;
+}
+
+static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
+{
+ int i, j, reg, count;
+ int dwords;
+ uint32_t packet0;
+ if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
+ return;
+
+ dwords = (*state->check) (radeon->glCtx, state);
+
+ fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
+
+ if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
+ if (dwords > state->cmd_size)
+ dwords = state->cmd_size;
+ for (i = 0; i < dwords;) {
+ packet0 = state->cmd[i];
+ reg = (packet0 & 0x1FFF) << 2;
+ count = ((packet0 & 0x3FFF0000) >> 16) + 1;
+ fprintf(stderr, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
+ state->name, i, reg, count);
+ ++i;
+ for (j = 0; j < count && i < dwords; j++) {
+ fprintf(stderr, " %s[%d]: 0x%04x = %08x\n",
+ state->name, i, reg, state->cmd[i]);
+ reg += 4;
+ ++i;
+ }
+ }
+ }
+}
+
+/**
+ * Count total size for next state emit.
+ **/
+GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
+{
+ struct radeon_state_atom *atom;
+ GLuint dwords = 0;
+ /* check if we are going to emit full state */
+
+ if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
+ if (!radeon->hw.is_dirty)
+ goto out;
+ foreach(atom, &radeon->hw.atomlist) {
+ if (atom->dirty) {
+ const GLuint atom_size = atom->check(radeon->glCtx, atom);
+ dwords += atom_size;
+ if (RADEON_CMDBUF && atom_size) {
+ radeon_print_state_atom(radeon, atom);
+ }
+ }
+ }
+ } else {
+ foreach(atom, &radeon->hw.atomlist) {
+ const GLuint atom_size = atom->check(radeon->glCtx, atom);
+ dwords += atom_size;
+ if (RADEON_CMDBUF && atom_size) {
+ radeon_print_state_atom(radeon, atom);
+ }
+
+ }
+ }
+out:
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
+ return dwords;
+}
+
+static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
+{
+ BATCH_LOCALS(radeon);
+ int dwords;
+
+ dwords = (*atom->check) (radeon->glCtx, atom);
+ if (dwords) {
+
+ radeon_print_state_atom(radeon, atom);
+
+ if (atom->emit) {
+ (*atom->emit)(radeon->glCtx, atom);
+ } else {
+ BEGIN_BATCH_NO_AUTOSTATE(dwords);
+ OUT_BATCH_TABLE(atom->cmd, dwords);
+ END_BATCH();
+ }
+ atom->dirty = GL_FALSE;
+
+ } else {
+ radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
+ }
+
+}
+
+static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
+{
+ struct radeon_state_atom *atom;
+
+ if (radeon->vtbl.pre_emit_atoms)
+ radeon->vtbl.pre_emit_atoms(radeon);
+
+ /* Emit actual atoms */
+ if (radeon->hw.all_dirty || emitAll) {
+ foreach(atom, &radeon->hw.atomlist)
+ radeon_emit_atom( radeon, atom );
+ } else {
+ foreach(atom, &radeon->hw.atomlist) {
+ if ( atom->dirty )
+ radeon_emit_atom( radeon, atom );
+ }
+ }
+
+ COMMIT_BATCH();
+}
+
+static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ int ret;
+
+ ret = radeon_cs_space_check(radeon->cmdbuf.cs);
+ if (ret == RADEON_CS_SPACE_FLUSH)
+ return GL_FALSE;
+ return GL_TRUE;
+}
+
+void radeonEmitState(radeonContextPtr radeon)
+{
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
+
+ if (radeon->vtbl.pre_emit_state)
+ radeon->vtbl.pre_emit_state(radeon);
+
+ /* this code used to return here but now it emits zbs */
+ if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
+ return;
+
+ if (!radeon->cmdbuf.cs->cdw) {
+ if (RADEON_DEBUG & RADEON_STATE)
+ fprintf(stderr, "Begin reemit state\n");
+
+ radeonEmitAtoms(radeon, GL_TRUE);
+ } else {
+
+ if (RADEON_DEBUG & RADEON_STATE)
+ fprintf(stderr, "Begin dirty state\n");
+
+ radeonEmitAtoms(radeon, GL_FALSE);
+ }
+
+ radeon->hw.is_dirty = GL_FALSE;
+ radeon->hw.all_dirty = GL_FALSE;
+}
+
+
+void radeonFlush(struct gl_context *ctx)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ if (RADEON_DEBUG & RADEON_IOCTL)
+ fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
+
+ /* okay if we have no cmds in the buffer &&
+ we have no DMA flush &&
+ we have no DMA buffer allocated.
+ then no point flushing anything at all.
+ */
+ if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
+ goto flush_front;
+
+ if (radeon->dma.flush)
+ radeon->dma.flush( ctx );
+
+ if (radeon->cmdbuf.cs->cdw)
+ rcommonFlushCmdBuf(radeon, __FUNCTION__);
+
+flush_front:
+ if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
+ __DRIscreen *const screen = radeon->radeonScreen->driScreen;
+
+ if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
+ && (screen->dri2.loader->flushFrontBuffer != NULL)) {
+ __DRIdrawable * drawable = radeon_get_drawable(radeon);
+
+ /* We set the dirty bit in radeon_prepare_render() if we're
+ * front buffer rendering once we get there.
+ */
+ radeon->front_buffer_dirty = GL_FALSE;
+
+ (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
+ }
+ }
+}
+
+/* Make sure all commands have been sent to the hardware and have
+ * completed processing.
+ */
+void radeonFinish(struct gl_context * ctx)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ struct radeon_renderbuffer *rrb;
+ int i;
+
+ if (ctx->Driver.Flush)
+ ctx->Driver.Flush(ctx); /* +r6/r7 */
+
+ for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct radeon_renderbuffer *rrb;
+ rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
+ if (rrb && rrb->bo)
+ radeon_bo_wait(rrb->bo);
+ }
+ rrb = radeon_get_depthbuffer(radeon);
+ if (rrb && rrb->bo)
+ radeon_bo_wait(rrb->bo);
+}
+
+/* cmdbuffer */
+/**
+ * Send the current command buffer via ioctl to the hardware.
+ */
+int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
+{
+ int ret = 0;
+
+ if (rmesa->cmdbuf.flushing) {
+ fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
+ exit(-1);
+ }
+ rmesa->cmdbuf.flushing = 1;
+
+ if (RADEON_DEBUG & RADEON_IOCTL) {
+ fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
+ }
+
+ radeonEmitQueryEnd(rmesa->glCtx);
+
+ if (rmesa->cmdbuf.cs->cdw) {
+ ret = radeon_cs_emit(rmesa->cmdbuf.cs);
+ rmesa->hw.all_dirty = GL_TRUE;
+ }
+ radeon_cs_erase(rmesa->cmdbuf.cs);
+ rmesa->cmdbuf.flushing = 0;
+
+ if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
+ fprintf(stderr,"failed to revalidate buffers\n");
+ }
+
+ return ret;
+}
+
+int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
+{
+ int ret;
+
+ radeonReleaseDmaRegions(rmesa);
+
+ ret = rcommonFlushCmdBufLocked(rmesa, caller);
+
+ if (ret) {
+ fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
+ "parse or rejected command stream. See dmesg "
+ "for more info.\n", ret);
+ exit(ret);
+ }
+
+ return ret;
+}
+
+/**
+ * Make sure that enough space is available in the command buffer
+ * by flushing if necessary.
+ *
+ * \param dwords The number of dwords we need to be free on the command buffer
+ */
+GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
+{
+ if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
+ || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
+ /* If we try to flush empty buffer there is too big rendering operation. */
+ assert(rmesa->cmdbuf.cs->cdw);
+ rcommonFlushCmdBuf(rmesa, caller);
+ return GL_TRUE;
+ }
+ return GL_FALSE;
+}
+
+void rcommonInitCmdBuf(radeonContextPtr rmesa)
+{
+ GLuint size;
+ struct drm_radeon_gem_info mminfo = { 0 };
+
+ /* Initialize command buffer */
+ size = 256 * driQueryOptioni(&rmesa->optionCache,
+ "command_buffer_size");
+ if (size < 2 * rmesa->hw.max_state_size) {
+ size = 2 * rmesa->hw.max_state_size + 65535;
+ }
+ if (size > 64 * 256)
+ size = 64 * 256;
+
+ radeon_print(RADEON_CS, RADEON_VERBOSE,
+ "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
+ radeon_print(RADEON_CS, RADEON_VERBOSE,
+ "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
+ radeon_print(RADEON_CS, RADEON_VERBOSE,
+ "Allocating %d bytes command buffer (max state is %d bytes)\n",
+ size * 4, rmesa->hw.max_state_size * 4);
+
+ rmesa->cmdbuf.csm =
+ radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
+ if (rmesa->cmdbuf.csm == NULL) {
+ /* FIXME: fatal error */
+ return;
+ }
+ rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
+ assert(rmesa->cmdbuf.cs != NULL);
+ rmesa->cmdbuf.size = size;
+
+ radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
+ (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
+
+
+ if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
+ &mminfo, sizeof(mminfo))) {
+ radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
+ mminfo.vram_visible);
+ radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
+ mminfo.gart_size);
+ }
+}
+
+/**
+ * Destroy the command buffer
+ */
+void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
+{
+ radeon_cs_destroy(rmesa->cmdbuf.cs);
+ radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
+}
+
+void rcommonBeginBatch(radeonContextPtr rmesa, int n,
+ int dostate,
+ const char *file,
+ const char *function,
+ int line)
+{
+ radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
+
+ radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
+ n, rmesa->cmdbuf.cs->cdw, function, line);
+
+}
+
+void radeonUserClear(struct gl_context *ctx, GLuint mask)
+{
+ _mesa_meta_Clear(ctx, mask);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.h
index 5bcb696a9f7..636822faed2 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common.h
@@ -1 +1,88 @@
-../radeon/radeon_common.h \ No newline at end of file
+#ifndef COMMON_MISC_H
+#define COMMON_MISC_H
+
+#include "radeon_common_context.h"
+#include "radeon_dma.h"
+#include "radeon_texture.h"
+
+void radeonUserClear(struct gl_context *ctx, GLuint mask);
+void radeonSetCliprects(radeonContextPtr radeon);
+void radeonUpdateScissor( struct gl_context *ctx );
+void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h);
+
+extern uint32_t radeonGetAge(radeonContextPtr radeon);
+
+void radeonFlush(struct gl_context *ctx);
+void radeonFinish(struct gl_context * ctx);
+void radeonEmitState(radeonContextPtr radeon);
+GLuint radeonCountStateEmitSize(radeonContextPtr radeon);
+
+void radeon_clear_tris(struct gl_context *ctx, GLbitfield mask);
+
+void radeon_window_moved(radeonContextPtr radeon);
+void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb);
+void radeonDrawBuffer( struct gl_context *ctx, GLenum mode );
+void radeonReadBuffer( struct gl_context *ctx, GLenum mode );
+void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height);
+void radeon_fbo_init(struct radeon_context *radeon);
+void
+radeon_renderbuffer_set_bo(struct radeon_renderbuffer *rb,
+ struct radeon_bo *bo);
+struct radeon_renderbuffer *
+radeon_create_renderbuffer(gl_format format, __DRIdrawable *driDrawPriv);
+
+void
+radeonReadPixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels);
+
+void radeon_check_front_buffer_rendering(struct gl_context *ctx);
+static inline struct radeon_renderbuffer *radeon_renderbuffer(struct gl_renderbuffer *rb)
+{
+ struct radeon_renderbuffer *rrb = (struct radeon_renderbuffer *)rb;
+ radeon_print(RADEON_MEMORY, RADEON_TRACE,
+ "%s(rb %p)\n",
+ __func__, (void *) rb);
+ if (rrb && rrb->base.Base.ClassID == RADEON_RB_CLASS)
+ return rrb;
+ else
+ return NULL;
+}
+
+static inline struct radeon_renderbuffer *radeon_get_renderbuffer(struct gl_framebuffer *fb, int att_index)
+{
+ radeon_print(RADEON_MEMORY, RADEON_TRACE,
+ "%s(fb %p, index %d)\n",
+ __func__, (void *) fb, att_index);
+
+ if (att_index >= 0)
+ return radeon_renderbuffer(fb->Attachment[att_index].Renderbuffer);
+ else
+ return NULL;
+}
+
+static inline struct radeon_renderbuffer *radeon_get_depthbuffer(radeonContextPtr rmesa)
+{
+ struct radeon_renderbuffer *rrb;
+ rrb = radeon_renderbuffer(rmesa->state.depth.rb);
+ if (!rrb)
+ return NULL;
+
+ return rrb;
+}
+
+static inline struct radeon_renderbuffer *radeon_get_colorbuffer(radeonContextPtr rmesa)
+{
+ struct radeon_renderbuffer *rrb;
+
+ rrb = radeon_renderbuffer(rmesa->state.color.rb);
+ if (!rrb)
+ return NULL;
+ return rrb;
+}
+
+#include "radeon_cmdbuf.h"
+
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.c
index 86800f3819c..8ea8925d8a2 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.c
@@ -1 +1,633 @@
-../radeon/radeon_common_context.c \ No newline at end of file
+/**************************************************************************
+
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
+ VA Linux Systems Inc., Fremont, California.
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+
+The Weather Channel (TM) funded Tungsten Graphics to develop the
+initial release of the Radeon 8500 driver under the XFree86 license.
+This notice must be preserved.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "radeon_common.h"
+#include "xmlpool.h" /* for symbolic values of enum-type options */
+#include "utils.h"
+#include "drivers/common/meta.h"
+#include "main/context.h"
+#include "main/framebuffer.h"
+#include "main/fbobject.h"
+#include "main/renderbuffer.h"
+#include "main/state.h"
+#include "main/simple_list.h"
+#include "swrast/swrast.h"
+#include "swrast_setup/swrast_setup.h"
+#include "tnl/tnl.h"
+
+#ifndef RADEON_DEBUG
+int RADEON_DEBUG = (0);
+#endif
+
+
+static const char* get_chip_family_name(int chip_family)
+{
+ switch(chip_family) {
+#if defined(RADEON_R100)
+ case CHIP_FAMILY_R100: return "R100";
+ case CHIP_FAMILY_RV100: return "RV100";
+ case CHIP_FAMILY_RS100: return "RS100";
+ case CHIP_FAMILY_RV200: return "RV200";
+ case CHIP_FAMILY_RS200: return "RS200";
+#elif defined(RADEON_R200)
+ case CHIP_FAMILY_R200: return "R200";
+ case CHIP_FAMILY_RV250: return "RV250";
+ case CHIP_FAMILY_RS300: return "RS300";
+ case CHIP_FAMILY_RV280: return "RV280";
+#endif
+ default: return "unknown";
+ }
+}
+
+
+/* Return various strings for glGetString().
+ */
+static const GLubyte *radeonGetString(struct gl_context * ctx, GLenum name)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ static char buffer[128];
+
+ switch (name) {
+ case GL_VENDOR:
+ return (GLubyte *) "Tungsten Graphics, Inc.";
+
+ case GL_RENDERER:
+ {
+ unsigned offset;
+ GLuint agp_mode = (radeon->radeonScreen->card_type==RADEON_CARD_PCI) ? 0 :
+ radeon->radeonScreen->AGPMode;
+ char hardwarename[32];
+
+ sprintf(hardwarename, "%s (%s %04X)",
+#if defined(RADEON_R100)
+ "R100",
+#elif defined(RADEON_R200)
+ "R200",
+#endif
+ get_chip_family_name(radeon->radeonScreen->chip_family),
+ radeon->radeonScreen->device_id);
+
+ offset = driGetRendererString(buffer, hardwarename, agp_mode);
+
+ sprintf(&buffer[offset], " %sTCL",
+ !(radeon->TclFallback & RADEON_TCL_FALLBACK_TCL_DISABLE)
+ ? "" : "NO-");
+
+ strcat(buffer, " DRI2");
+
+ return (GLubyte *) buffer;
+ }
+
+ default:
+ return NULL;
+ }
+}
+
+/* Initialize the driver's misc functions.
+ */
+static void radeonInitDriverFuncs(struct dd_function_table *functions)
+{
+ functions->GetString = radeonGetString;
+}
+
+/**
+ * Create and initialize all common fields of the context,
+ * including the Mesa context itself.
+ */
+GLboolean radeonInitContext(radeonContextPtr radeon,
+ struct dd_function_table* functions,
+ const struct gl_config * glVisual,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate)
+{
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ radeonScreenPtr screen = (radeonScreenPtr) (sPriv->driverPrivate);
+ struct gl_context* ctx;
+ struct gl_context* shareCtx;
+ int fthrottle_mode;
+
+ /* Fill in additional standard functions. */
+ radeonInitDriverFuncs(functions);
+
+ radeon->radeonScreen = screen;
+ /* Allocate and initialize the Mesa context */
+ if (sharedContextPrivate)
+ shareCtx = ((radeonContextPtr)sharedContextPrivate)->glCtx;
+ else
+ shareCtx = NULL;
+ radeon->glCtx = _mesa_create_context(API_OPENGL, glVisual, shareCtx,
+ functions, (void *)radeon);
+ if (!radeon->glCtx)
+ return GL_FALSE;
+
+ ctx = radeon->glCtx;
+ driContextPriv->driverPrivate = radeon;
+
+ _mesa_meta_init(ctx);
+
+ /* DRI fields */
+ radeon->dri.context = driContextPriv;
+ radeon->dri.screen = sPriv;
+ radeon->dri.fd = sPriv->fd;
+ radeon->dri.drmMinor = sPriv->drm_version.minor;
+
+ /* Setup IRQs */
+ fthrottle_mode = driQueryOptioni(&radeon->optionCache, "fthrottle_mode");
+ radeon->iw.irq_seq = -1;
+ radeon->irqsEmitted = 0;
+ radeon->do_irqs = (fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS &&
+ radeon->radeonScreen->irq);
+
+ radeon->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
+
+ if (!radeon->do_irqs)
+ fprintf(stderr,
+ "IRQ's not enabled, falling back to %s: %d %d\n",
+ radeon->do_usleeps ? "usleeps" : "busy waits",
+ fthrottle_mode, radeon->radeonScreen->irq);
+
+ radeon->texture_depth = driQueryOptioni (&radeon->optionCache,
+ "texture_depth");
+ if (radeon->texture_depth == DRI_CONF_TEXTURE_DEPTH_FB)
+ radeon->texture_depth = ( glVisual->rgbBits > 16 ) ?
+ DRI_CONF_TEXTURE_DEPTH_32 : DRI_CONF_TEXTURE_DEPTH_16;
+
+ radeon->texture_row_align = 32;
+ radeon->texture_rect_row_align = 64;
+ radeon->texture_compressed_row_align = 32;
+
+ radeon_init_dma(radeon);
+
+ return GL_TRUE;
+}
+
+
+
+/**
+ * Destroy the command buffer and state atoms.
+ */
+static void radeon_destroy_atom_list(radeonContextPtr radeon)
+{
+ struct radeon_state_atom *atom;
+
+ foreach(atom, &radeon->hw.atomlist) {
+ FREE(atom->cmd);
+ if (atom->lastcmd)
+ FREE(atom->lastcmd);
+ }
+
+}
+
+/**
+ * Cleanup common context fields.
+ * Called by r200DestroyContext
+ */
+void radeonDestroyContext(__DRIcontext *driContextPriv )
+{
+#ifdef RADEON_BO_TRACK
+ FILE *track;
+#endif
+ GET_CURRENT_CONTEXT(ctx);
+ radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
+ radeonContextPtr current = ctx ? RADEON_CONTEXT(ctx) : NULL;
+
+ assert(radeon);
+
+ _mesa_meta_free(radeon->glCtx);
+
+ if (radeon == current) {
+ _mesa_make_current(NULL, NULL, NULL);
+ }
+
+ radeon_firevertices(radeon);
+ if (!is_empty_list(&radeon->dma.reserved)) {
+ rcommonFlushCmdBuf( radeon, __FUNCTION__ );
+ }
+
+ radeonFreeDmaRegions(radeon);
+ radeonReleaseArrays(radeon->glCtx, ~0);
+ if (radeon->vtbl.free_context)
+ radeon->vtbl.free_context(radeon->glCtx);
+ _swsetup_DestroyContext( radeon->glCtx );
+ _tnl_DestroyContext( radeon->glCtx );
+ _vbo_DestroyContext( radeon->glCtx );
+ _swrast_DestroyContext( radeon->glCtx );
+
+ /* free atom list */
+ /* free the Mesa context */
+ _mesa_destroy_context(radeon->glCtx);
+
+ /* _mesa_destroy_context() might result in calls to functions that
+ * depend on the DriverCtx, so don't set it to NULL before.
+ *
+ * radeon->glCtx->DriverCtx = NULL;
+ */
+ /* free the option cache */
+ driDestroyOptionCache(&radeon->optionCache);
+
+ rcommonDestroyCmdBuf(radeon);
+
+ radeon_destroy_atom_list(radeon);
+
+#ifdef RADEON_BO_TRACK
+ track = fopen("/tmp/tracklog", "w");
+ if (track) {
+ radeon_tracker_print(&radeon->radeonScreen->bom->tracker, track);
+ fclose(track);
+ }
+#endif
+ FREE(radeon);
+}
+
+/* Force the context `c' to be unbound from its buffer.
+ */
+GLboolean radeonUnbindContext(__DRIcontext * driContextPriv)
+{
+ radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
+
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
+ radeon->glCtx);
+
+ /* Unset current context and dispath table */
+ _mesa_make_current(NULL, NULL, NULL);
+
+ return GL_TRUE;
+}
+
+
+static unsigned
+radeon_bits_per_pixel(const struct radeon_renderbuffer *rb)
+{
+ return _mesa_get_format_bytes(rb->base.Base.Format) * 8;
+}
+
+/*
+ * Check if drawable has been invalidated by dri2InvalidateDrawable().
+ * Update renderbuffers if so. This prevents a client from accessing
+ * a backbuffer that has a swap pending but not yet completed.
+ *
+ * See intel_prepare_render for equivalent code in intel driver.
+ *
+ */
+void radeon_prepare_render(radeonContextPtr radeon)
+{
+ __DRIcontext *driContext = radeon->dri.context;
+ __DRIdrawable *drawable;
+ __DRIscreen *screen;
+
+ screen = driContext->driScreenPriv;
+ if (!screen->dri2.loader)
+ return;
+
+ drawable = driContext->driDrawablePriv;
+ if (drawable->dri2.stamp != driContext->dri2.draw_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ radeon_update_renderbuffers(driContext, drawable, GL_FALSE);
+
+ /* Intel driver does the equivalent of this, no clue if it is needed:*/
+ radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
+
+ driContext->dri2.draw_stamp = drawable->dri2.stamp;
+ }
+
+ drawable = driContext->driReadablePriv;
+ if (drawable->dri2.stamp != driContext->dri2.read_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ radeon_update_renderbuffers(driContext, drawable, GL_FALSE);
+ driContext->dri2.read_stamp = drawable->dri2.stamp;
+ }
+
+ /* If we're currently rendering to the front buffer, the rendering
+ * that will happen next will probably dirty the front buffer. So
+ * mark it as dirty here.
+ */
+ if (radeon->is_front_buffer_rendering)
+ radeon->front_buffer_dirty = GL_TRUE;
+}
+
+void
+radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
+ GLboolean front_only)
+{
+ unsigned int attachments[10];
+ __DRIbuffer *buffers = NULL;
+ __DRIscreen *screen;
+ struct radeon_renderbuffer *rb;
+ int i, count;
+ struct radeon_framebuffer *draw;
+ radeonContextPtr radeon;
+ char *regname;
+ struct radeon_bo *depth_bo = NULL, *bo;
+
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
+
+ draw = drawable->driverPrivate;
+ screen = context->driScreenPriv;
+ radeon = (radeonContextPtr) context->driverPrivate;
+
+ /* Set this up front, so that in case our buffers get invalidated
+ * while we're getting new buffers, we don't clobber the stamp and
+ * thus ignore the invalidate. */
+ drawable->lastStamp = drawable->dri2.stamp;
+
+ if (screen->dri2.loader
+ && (screen->dri2.loader->base.version > 2)
+ && (screen->dri2.loader->getBuffersWithFormat != NULL)) {
+ struct radeon_renderbuffer *depth_rb;
+ struct radeon_renderbuffer *stencil_rb;
+
+ i = 0;
+ if ((front_only || radeon->is_front_buffer_rendering ||
+ radeon->is_front_buffer_reading ||
+ !draw->color_rb[1])
+ && draw->color_rb[0]) {
+ attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
+ attachments[i++] = radeon_bits_per_pixel(draw->color_rb[0]);
+ }
+
+ if (!front_only) {
+ if (draw->color_rb[1]) {
+ attachments[i++] = __DRI_BUFFER_BACK_LEFT;
+ attachments[i++] = radeon_bits_per_pixel(draw->color_rb[1]);
+ }
+
+ depth_rb = radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH);
+ stencil_rb = radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL);
+
+ if ((depth_rb != NULL) && (stencil_rb != NULL)) {
+ attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
+ attachments[i++] = radeon_bits_per_pixel(depth_rb);
+ } else if (depth_rb != NULL) {
+ attachments[i++] = __DRI_BUFFER_DEPTH;
+ attachments[i++] = radeon_bits_per_pixel(depth_rb);
+ } else if (stencil_rb != NULL) {
+ attachments[i++] = __DRI_BUFFER_STENCIL;
+ attachments[i++] = radeon_bits_per_pixel(stencil_rb);
+ }
+ }
+
+ buffers = (*screen->dri2.loader->getBuffersWithFormat)(drawable,
+ &drawable->w,
+ &drawable->h,
+ attachments, i / 2,
+ &count,
+ drawable->loaderPrivate);
+ } else if (screen->dri2.loader) {
+ i = 0;
+ if (draw->color_rb[0])
+ attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
+ if (!front_only) {
+ if (draw->color_rb[1])
+ attachments[i++] = __DRI_BUFFER_BACK_LEFT;
+ if (radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH))
+ attachments[i++] = __DRI_BUFFER_DEPTH;
+ if (radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL))
+ attachments[i++] = __DRI_BUFFER_STENCIL;
+ }
+
+ buffers = (*screen->dri2.loader->getBuffers)(drawable,
+ &drawable->w,
+ &drawable->h,
+ attachments, i,
+ &count,
+ drawable->loaderPrivate);
+ }
+
+ if (buffers == NULL)
+ return;
+
+ for (i = 0; i < count; i++) {
+ switch (buffers[i].attachment) {
+ case __DRI_BUFFER_FRONT_LEFT:
+ rb = draw->color_rb[0];
+ regname = "dri2 front buffer";
+ break;
+ case __DRI_BUFFER_FAKE_FRONT_LEFT:
+ rb = draw->color_rb[0];
+ regname = "dri2 fake front buffer";
+ break;
+ case __DRI_BUFFER_BACK_LEFT:
+ rb = draw->color_rb[1];
+ regname = "dri2 back buffer";
+ break;
+ case __DRI_BUFFER_DEPTH:
+ rb = radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH);
+ regname = "dri2 depth buffer";
+ break;
+ case __DRI_BUFFER_DEPTH_STENCIL:
+ rb = radeon_get_renderbuffer(&draw->base, BUFFER_DEPTH);
+ regname = "dri2 depth / stencil buffer";
+ break;
+ case __DRI_BUFFER_STENCIL:
+ rb = radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL);
+ regname = "dri2 stencil buffer";
+ break;
+ case __DRI_BUFFER_ACCUM:
+ default:
+ fprintf(stderr,
+ "unhandled buffer attach event, attacment type %d\n",
+ buffers[i].attachment);
+ return;
+ }
+
+ if (rb == NULL)
+ continue;
+
+ if (rb->bo) {
+ uint32_t name = radeon_gem_name_bo(rb->bo);
+ if (name == buffers[i].name)
+ continue;
+ }
+
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr,
+ "attaching buffer %s, %d, at %d, cpp %d, pitch %d\n",
+ regname, buffers[i].name, buffers[i].attachment,
+ buffers[i].cpp, buffers[i].pitch);
+
+ rb->cpp = buffers[i].cpp;
+ rb->pitch = buffers[i].pitch;
+ rb->base.Base.Width = drawable->w;
+ rb->base.Base.Height = drawable->h;
+ rb->has_surface = 0;
+
+ if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_bo) {
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr, "(reusing depth buffer as stencil)\n");
+ bo = depth_bo;
+ radeon_bo_ref(bo);
+ } else {
+ uint32_t tiling_flags = 0, pitch = 0;
+ int ret;
+
+ bo = radeon_bo_open(radeon->radeonScreen->bom,
+ buffers[i].name,
+ 0,
+ 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ buffers[i].flags);
+
+ if (bo == NULL) {
+ fprintf(stderr, "failed to attach %s %d\n",
+ regname, buffers[i].name);
+ continue;
+ }
+
+ ret = radeon_bo_get_tiling(bo, &tiling_flags, &pitch);
+ if (ret) {
+ fprintf(stderr,
+ "failed to get tiling for %s %d\n",
+ regname, buffers[i].name);
+ radeon_bo_unref(bo);
+ bo = NULL;
+ continue;
+ } else {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ bo->flags |= RADEON_BO_FLAGS_MACRO_TILE;
+ if (tiling_flags & RADEON_TILING_MICRO)
+ bo->flags |= RADEON_BO_FLAGS_MICRO_TILE;
+ }
+ }
+
+ if (buffers[i].attachment == __DRI_BUFFER_DEPTH) {
+ if (draw->base.Visual.depthBits == 16)
+ rb->cpp = 2;
+ depth_bo = bo;
+ }
+
+ radeon_renderbuffer_set_bo(rb, bo);
+ radeon_bo_unref(bo);
+
+ if (buffers[i].attachment == __DRI_BUFFER_DEPTH_STENCIL) {
+ rb = radeon_get_renderbuffer(&draw->base, BUFFER_STENCIL);
+ if (rb != NULL) {
+ struct radeon_bo *stencil_bo = NULL;
+
+ if (rb->bo) {
+ uint32_t name = radeon_gem_name_bo(rb->bo);
+ if (name == buffers[i].name)
+ continue;
+ }
+
+ stencil_bo = bo;
+ radeon_bo_ref(stencil_bo);
+ radeon_renderbuffer_set_bo(rb, stencil_bo);
+ radeon_bo_unref(stencil_bo);
+ }
+ }
+ }
+
+ driUpdateFramebufferSize(radeon->glCtx, drawable);
+}
+
+/* Force the context `c' to be the current context and associate with it
+ * buffer `b'.
+ */
+GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv)
+{
+ radeonContextPtr radeon;
+ GET_CURRENT_CONTEXT(curCtx);
+ struct gl_framebuffer *drfb, *readfb;
+
+ if (driContextPriv)
+ radeon = (radeonContextPtr)driContextPriv->driverPrivate;
+ else
+ radeon = NULL;
+ /* According to the glXMakeCurrent() man page: "Pending commands to
+ * the previous context, if any, are flushed before it is released."
+ * But only flush if we're actually changing contexts.
+ */
+
+ if ((radeonContextPtr)curCtx && (radeonContextPtr)curCtx != radeon) {
+ _mesa_flush(curCtx);
+ }
+
+ if (!driContextPriv) {
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr, "%s ctx is null\n", __FUNCTION__);
+ _mesa_make_current(NULL, NULL, NULL);
+ return GL_TRUE;
+ }
+
+ if(driDrawPriv == NULL && driReadPriv == NULL) {
+ drfb = _mesa_create_framebuffer(&radeon->glCtx->Visual);
+ readfb = drfb;
+ }
+ else {
+ drfb = driDrawPriv->driverPrivate;
+ readfb = driReadPriv->driverPrivate;
+ }
+
+ if(driDrawPriv)
+ radeon_update_renderbuffers(driContextPriv, driDrawPriv, GL_FALSE);
+ if (driDrawPriv != driReadPriv)
+ radeon_update_renderbuffers(driContextPriv, driReadPriv, GL_FALSE);
+ _mesa_reference_renderbuffer(&radeon->state.color.rb,
+ &(radeon_get_renderbuffer(drfb, BUFFER_BACK_LEFT)->base.Base));
+ _mesa_reference_renderbuffer(&radeon->state.depth.rb,
+ &(radeon_get_renderbuffer(drfb, BUFFER_DEPTH)->base.Base));
+
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, radeon->glCtx, drfb, readfb);
+
+ if(driDrawPriv)
+ driUpdateFramebufferSize(radeon->glCtx, driDrawPriv);
+ if (driReadPriv != driDrawPriv)
+ driUpdateFramebufferSize(radeon->glCtx, driReadPriv);
+
+ _mesa_make_current(radeon->glCtx, drfb, readfb);
+ if (driDrawPriv == NULL && driReadPriv == NULL)
+ _mesa_reference_framebuffer(&drfb, NULL);
+
+ _mesa_update_state(radeon->glCtx);
+
+ if (radeon->glCtx->DrawBuffer == drfb) {
+ if(driDrawPriv != NULL) {
+ radeon_window_moved(radeon);
+ }
+
+ radeon_draw_buffer(radeon->glCtx, drfb);
+ }
+
+
+ if (RADEON_DEBUG & RADEON_DRI)
+ fprintf(stderr, "End %s\n", __FUNCTION__);
+
+ return GL_TRUE;
+}
+
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.h
index 4d663125500..5c2389208f1 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_common_context.h
@@ -1 +1,530 @@
-../radeon/radeon_common_context.h \ No newline at end of file
+
+#ifndef COMMON_CONTEXT_H
+#define COMMON_CONTEXT_H
+
+#include "main/mm.h"
+#include "math/m_vector.h"
+#include "tnl/t_context.h"
+#include "main/colormac.h"
+
+#include "radeon_debug.h"
+#include "radeon_screen.h"
+#include "radeon_drm.h"
+#include "dri_util.h"
+#include "tnl/t_vertex.h"
+#include "swrast/s_context.h"
+
+struct radeon_context;
+
+#include "radeon_bo_gem.h"
+#include "radeon_cs_gem.h"
+
+/* This union is used to avoid warnings/miscompilation
+ with float to uint32_t casts due to strict-aliasing */
+typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
+
+struct radeon_context;
+typedef struct radeon_context radeonContextRec;
+typedef struct radeon_context *radeonContextPtr;
+
+
+#define TEX_0 0x1
+#define TEX_1 0x2
+#define TEX_2 0x4
+#define TEX_3 0x8
+#define TEX_4 0x10
+#define TEX_5 0x20
+
+/* Rasterizing fallbacks */
+/* See correponding strings in r200_swtcl.c */
+#define RADEON_FALLBACK_TEXTURE 0x0001
+#define RADEON_FALLBACK_DRAW_BUFFER 0x0002
+#define RADEON_FALLBACK_STENCIL 0x0004
+#define RADEON_FALLBACK_RENDER_MODE 0x0008
+#define RADEON_FALLBACK_BLEND_EQ 0x0010
+#define RADEON_FALLBACK_BLEND_FUNC 0x0020
+#define RADEON_FALLBACK_DISABLE 0x0040
+#define RADEON_FALLBACK_BORDER_MODE 0x0080
+#define RADEON_FALLBACK_DEPTH_BUFFER 0x0100
+#define RADEON_FALLBACK_STENCIL_BUFFER 0x0200
+
+#define R200_FALLBACK_TEXTURE 0x01
+#define R200_FALLBACK_DRAW_BUFFER 0x02
+#define R200_FALLBACK_STENCIL 0x04
+#define R200_FALLBACK_RENDER_MODE 0x08
+#define R200_FALLBACK_DISABLE 0x10
+#define R200_FALLBACK_BORDER_MODE 0x20
+
+#define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
+#define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
+#define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
+#define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
+#define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
+#define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
+#define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
+#define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
+#define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
+
+/* The blit width for texture uploads
+ */
+#define BLIT_WIDTH_BYTES 1024
+
+/* Use the templated vertex format:
+ */
+#define COLOR_IS_RGBA
+#define TAG(x) radeon##x
+#include "tnl_dd/t_dd_vertex.h"
+#undef TAG
+
+#define RADEON_RB_CLASS 0xdeadbeef
+
+struct radeon_renderbuffer
+{
+ struct swrast_renderbuffer base;
+
+ struct radeon_bo *bo;
+ unsigned int cpp;
+ /* unsigned int offset; */
+ unsigned int pitch;
+
+ struct radeon_bo *map_bo;
+ GLbitfield map_mode;
+ int map_x, map_y, map_w, map_h;
+ int map_pitch;
+ void *map_buffer;
+
+ uint32_t draw_offset; /* FBO */
+ /* boo Xorg 6.8.2 compat */
+ int has_surface;
+
+ GLuint pf_pending; /**< sequence number of pending flip */
+ __DRIdrawable *dPriv;
+};
+
+struct radeon_framebuffer
+{
+ struct gl_framebuffer base;
+
+ struct radeon_renderbuffer *color_rb[2];
+};
+
+
+struct radeon_colorbuffer_state {
+ int roundEnable;
+ struct gl_renderbuffer *rb;
+ uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
+};
+
+struct radeon_depthbuffer_state {
+ struct gl_renderbuffer *rb;
+};
+
+struct radeon_scissor_state {
+ drm_clip_rect_t rect;
+ GLboolean enabled;
+};
+
+struct radeon_state_atom {
+ struct radeon_state_atom *next, *prev;
+ const char *name; /* for debug */
+ int cmd_size; /* size in bytes */
+ GLuint idx;
+ GLuint is_tcl;
+ GLuint *cmd; /* one or more cmd's */
+ GLuint *lastcmd; /* one or more cmd's */
+ GLboolean dirty; /* dirty-mark in emit_state_list */
+ int (*check) (struct gl_context *, struct radeon_state_atom *atom); /* is this state active? */
+ void (*emit) (struct gl_context *, struct radeon_state_atom *atom);
+};
+
+struct radeon_hw_state {
+ /* Head of the linked list of state atoms. */
+ struct radeon_state_atom atomlist;
+ int max_state_size; /* Number of bytes necessary for a full state emit. */
+ int max_post_flush_size; /* Number of bytes necessary for post flushing emits */
+ GLboolean is_dirty, all_dirty;
+};
+
+
+/* Texture related */
+typedef struct _radeon_texture_image radeon_texture_image;
+
+
+/**
+ * This is a subclass of swrast_texture_image since we use swrast
+ * for software fallback rendering.
+ */
+struct _radeon_texture_image {
+ struct swrast_texture_image base;
+
+ /**
+ * If mt != 0, the image is stored in hardware format in the
+ * given mipmap tree. In this case, base.Data may point into the
+ * mapping of the buffer object that contains the mipmap tree.
+ *
+ * If mt == 0, the image is stored in normal memory pointed to
+ * by base.Data.
+ */
+ struct _radeon_mipmap_tree *mt;
+ struct radeon_bo *bo;
+ GLboolean used_as_render_target;
+};
+
+
+static INLINE radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
+{
+ return (radeon_texture_image*)image;
+}
+
+
+typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
+
+#define RADEON_TXO_MICRO_TILE (1 << 3)
+
+/* Texture object in locally shared texture space.
+ */
+struct radeon_tex_obj {
+ struct gl_texture_object base;
+ struct _radeon_mipmap_tree *mt;
+
+ /**
+ * This is true if we've verified that the mipmap tree above is complete
+ * and so on.
+ */
+ GLboolean validated;
+ /* Minimum LOD to be used during rendering */
+ unsigned minLod;
+ /* Miximum LOD to be used during rendering */
+ unsigned maxLod;
+
+ GLuint override_offset;
+ GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
+ GLuint tile_bits; /* hw texture tile bits used on this texture */
+ struct radeon_bo *bo;
+
+ GLuint pp_txfilter; /* hardware register values */
+ GLuint pp_txformat;
+ GLuint pp_txformat_x;
+ GLuint pp_txsize; /* npot only */
+ GLuint pp_txpitch; /* npot only */
+ GLuint pp_border_color;
+ GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
+
+ GLboolean border_fallback;
+};
+
+static INLINE radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
+{
+ return (radeonTexObj*)texObj;
+}
+
+/* occlusion query */
+struct radeon_query_object {
+ struct gl_query_object Base;
+ struct radeon_bo *bo;
+ int curr_offset;
+ GLboolean emitted_begin;
+
+ /* Double linked list of not flushed query objects */
+ struct radeon_query_object *prev, *next;
+};
+
+/* Need refcounting on dma buffers:
+ */
+struct radeon_dma_buffer {
+ int refcount; /* the number of retained regions in buf */
+ drmBufPtr buf;
+};
+
+struct radeon_aos {
+ struct radeon_bo *bo; /** Buffer object where vertex data is stored */
+ int offset; /** Offset into buffer object, in bytes */
+ int components; /** Number of components per vertex */
+ int stride; /** Stride in dwords (may be 0 for repeating) */
+ int count; /** Number of vertices */
+};
+
+#define DMA_BO_FREE_TIME 100
+
+struct radeon_dma_bo {
+ struct radeon_dma_bo *next, *prev;
+ struct radeon_bo *bo;
+ int expire_counter;
+};
+
+struct radeon_dma {
+ /* Active dma region. Allocations for vertices and retained
+ * regions come from here. Also used for emitting random vertices,
+ * these may be flushed by calling flush_current();
+ */
+ struct radeon_dma_bo free;
+ struct radeon_dma_bo wait;
+ struct radeon_dma_bo reserved;
+ size_t current_used; /** Number of bytes allocated and forgotten about */
+ size_t current_vertexptr; /** End of active vertex region */
+ size_t minimum_size;
+
+ /**
+ * If current_vertexptr != current_used then flush must be non-zero.
+ * flush must be called before non-active vertex allocations can be
+ * performed.
+ */
+ void (*flush) (struct gl_context *);
+};
+
+/* radeon_swtcl.c
+ */
+struct radeon_swtcl_info {
+
+ GLuint RenderIndex;
+ GLuint vertex_size;
+ GLubyte *verts;
+
+ /* Fallback rasterization functions
+ */
+ GLuint hw_primitive;
+ GLenum render_primitive;
+ GLuint numverts;
+
+ struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
+ GLuint vertex_attr_count;
+
+ GLuint emit_prediction;
+ struct radeon_bo *bo;
+};
+
+#define RADEON_MAX_AOS_ARRAYS 16
+struct radeon_tcl_info {
+ struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
+ GLuint aos_count;
+ struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
+ int elt_dma_offset; /** Offset into this buffer object, in bytes */
+};
+
+struct radeon_ioctl {
+ GLuint vertex_offset;
+ GLuint vertex_max;
+ struct radeon_bo *bo;
+ GLuint vertex_size;
+};
+
+#define RADEON_MAX_PRIMS 64
+
+struct radeon_prim {
+ GLuint start;
+ GLuint end;
+ GLuint prim;
+};
+
+static INLINE GLuint radeonPackColor(GLuint cpp,
+ GLubyte r, GLubyte g,
+ GLubyte b, GLubyte a)
+{
+ switch (cpp) {
+ case 2:
+ return PACK_COLOR_565(r, g, b);
+ case 4:
+ return PACK_COLOR_8888(a, r, g, b);
+ default:
+ return 0;
+ }
+}
+
+#define MAX_CMD_BUF_SZ (16*1024)
+
+#define MAX_DMA_BUF_SZ (64*1024)
+
+struct radeon_store {
+ GLuint statenr;
+ GLuint primnr;
+ char cmd_buf[MAX_CMD_BUF_SZ];
+ int cmd_used;
+ int elts_start;
+};
+
+struct radeon_dri_mirror {
+ __DRIcontext *context; /* DRI context */
+ __DRIscreen *screen; /* DRI screen */
+
+ drm_context_t hwContext;
+ drm_hw_lock_t *hwLock;
+ int hwLockCount;
+ int fd;
+ int drmMinor;
+};
+
+typedef void (*radeon_tri_func) (radeonContextPtr,
+ radeonVertex *,
+ radeonVertex *, radeonVertex *);
+
+typedef void (*radeon_line_func) (radeonContextPtr,
+ radeonVertex *, radeonVertex *);
+
+typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
+
+#define RADEON_MAX_BOS 32
+struct radeon_state {
+ struct radeon_colorbuffer_state color;
+ struct radeon_depthbuffer_state depth;
+ struct radeon_scissor_state scissor;
+};
+
+/**
+ * This structure holds the command buffer while it is being constructed.
+ *
+ * The first batch of commands in the buffer is always the state that needs
+ * to be re-emitted when the context is lost. This batch can be skipped
+ * otherwise.
+ */
+struct radeon_cmdbuf {
+ struct radeon_cs_manager *csm;
+ struct radeon_cs *cs;
+ int size; /** # of dwords total */
+ unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
+};
+
+struct radeon_context {
+ struct gl_context *glCtx;
+ radeonScreenPtr radeonScreen; /* Screen private DRI data */
+
+ /* Texture object bookkeeping
+ */
+ int texture_depth;
+ float initialMaxAnisotropy;
+ uint32_t texture_row_align;
+ uint32_t texture_rect_row_align;
+ uint32_t texture_compressed_row_align;
+
+ struct radeon_dma dma;
+ struct radeon_hw_state hw;
+ /* Rasterization and vertex state:
+ */
+ GLuint TclFallback;
+ GLuint Fallback;
+ GLuint NewGLState;
+ GLbitfield64 tnl_index_bitset; /* index of bits for last tnl_install_attrs */
+
+ /* Drawable information */
+ unsigned int lastStamp;
+ drm_radeon_sarea_t *sarea; /* Private SAREA data */
+
+ /* Mirrors of some DRI state */
+ struct radeon_dri_mirror dri;
+
+ /* Busy waiting */
+ GLuint do_usleeps;
+ GLuint do_irqs;
+ GLuint irqsEmitted;
+ drm_radeon_irq_wait_t iw;
+
+ /* Derived state - for r300 only */
+ struct radeon_state state;
+
+ struct radeon_swtcl_info swtcl;
+ struct radeon_tcl_info tcl;
+ /* Configuration cache
+ */
+ driOptionCache optionCache;
+
+ struct radeon_cmdbuf cmdbuf;
+
+ struct radeon_debug debug;
+
+ drm_clip_rect_t fboRect;
+ GLboolean front_cliprects;
+
+ /**
+ * Set if rendering has occured to the drawable's front buffer.
+ *
+ * This is used in the DRI2 case to detect that glFlush should also copy
+ * the contents of the fake front buffer to the real front buffer.
+ */
+ GLboolean front_buffer_dirty;
+
+ /**
+ * Track whether front-buffer rendering is currently enabled
+ *
+ * A separate flag is used to track this in order to support MRT more
+ * easily.
+ */
+ GLboolean is_front_buffer_rendering;
+
+ /**
+ * Track whether front-buffer is the current read target.
+ *
+ * This is closely associated with is_front_buffer_rendering, but may
+ * be set separately. The DRI2 fake front buffer must be referenced
+ * either way.
+ */
+ GLboolean is_front_buffer_reading;
+
+ struct {
+ struct radeon_query_object *current;
+ struct radeon_state_atom queryobj;
+ } query;
+
+ struct {
+ void (*get_lock)(radeonContextPtr radeon);
+ void (*update_viewport_offset)(struct gl_context *ctx);
+ void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa);
+ void (*swtcl_flush)(struct gl_context *ctx, uint32_t offset);
+ void (*pre_emit_atoms)(radeonContextPtr rmesa);
+ void (*pre_emit_state)(radeonContextPtr rmesa);
+ void (*fallback)(struct gl_context *ctx, GLuint bit, GLboolean mode);
+ void (*free_context)(struct gl_context *ctx);
+ void (*emit_query_finish)(radeonContextPtr radeon);
+ void (*update_scissor)(struct gl_context *ctx);
+ unsigned (*check_blit)(gl_format mesa_format, uint32_t dst_pitch);
+ unsigned (*blit)(struct gl_context *ctx,
+ struct radeon_bo *src_bo,
+ intptr_t src_offset,
+ gl_format src_mesaformat,
+ unsigned src_pitch,
+ unsigned src_width,
+ unsigned src_height,
+ unsigned src_x_offset,
+ unsigned src_y_offset,
+ struct radeon_bo *dst_bo,
+ intptr_t dst_offset,
+ gl_format dst_mesaformat,
+ unsigned dst_pitch,
+ unsigned dst_width,
+ unsigned dst_height,
+ unsigned dst_x_offset,
+ unsigned dst_y_offset,
+ unsigned reg_width,
+ unsigned reg_height,
+ unsigned flip_y);
+ unsigned (*is_format_renderable)(gl_format mesa_format);
+ } vtbl;
+};
+
+#define RADEON_CONTEXT(glctx) ((radeonContextPtr)(ctx->DriverCtx))
+
+static inline __DRIdrawable* radeon_get_drawable(radeonContextPtr radeon)
+{
+ return radeon->dri.context->driDrawablePriv;
+}
+
+static inline __DRIdrawable* radeon_get_readable(radeonContextPtr radeon)
+{
+ return radeon->dri.context->driReadablePriv;
+}
+
+GLboolean radeonInitContext(radeonContextPtr radeon,
+ struct dd_function_table* functions,
+ const struct gl_config * glVisual,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate);
+
+void radeonCleanupContext(radeonContextPtr radeon);
+GLboolean radeonUnbindContext(__DRIcontext * driContextPriv);
+void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
+ GLboolean front_only);
+GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv);
+extern void radeonDestroyContext(__DRIcontext * driContextPriv);
+void radeon_prepare_render(radeonContextPtr radeon);
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.c
index c98c2e074c5..dd0afb809a0 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.c
@@ -1 +1,108 @@
-../radeon/radeon_debug.c \ No newline at end of file
+/*
+ * Copyright © 2009 Pauli Nieminen
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+
+#include "utils.h"
+
+#include "radeon_debug.h"
+#include "radeon_common_context.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+static const struct dri_debug_control debug_control[] = {
+ {"fall", RADEON_FALLBACKS},
+ {"tex", RADEON_TEXTURE},
+ {"ioctl", RADEON_IOCTL},
+ {"verts", RADEON_VERTS},
+ {"render", RADEON_RENDER},
+ {"swrender", RADEON_SWRENDER},
+ {"state", RADEON_STATE},
+ {"shader", RADEON_SHADER},
+ {"vfmt", RADEON_VFMT},
+ {"vtxf", RADEON_VFMT},
+ {"dri", RADEON_DRI},
+ {"dma", RADEON_DMA},
+ {"sanity", RADEON_SANITY},
+ {"sync", RADEON_SYNC},
+ {"pixel", RADEON_PIXEL},
+ {"mem", RADEON_MEMORY},
+ {"cs", RADEON_CS},
+ {"allmsg", ~RADEON_SYNC}, /* avoid the term "sync" because the parser uses strstr */
+ {NULL, 0}
+};
+
+radeon_debug_type_t radeon_enabled_debug_types;
+
+void radeon_init_debug(void)
+{
+ radeon_enabled_debug_types = driParseDebugString(getenv("RADEON_DEBUG"), debug_control);
+
+ radeon_enabled_debug_types |= RADEON_GENERAL;
+}
+
+void _radeon_debug_add_indent(void)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ const size_t length = sizeof(radeon->debug.indent)
+ / sizeof(radeon->debug.indent[0]);
+ if (radeon->debug.indent_depth < length - 1) {
+ radeon->debug.indent[radeon->debug.indent_depth] = '\t';
+ ++radeon->debug.indent_depth;
+ };
+}
+
+void _radeon_debug_remove_indent(void)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ if (radeon->debug.indent_depth > 0) {
+ radeon->debug.indent[radeon->debug.indent_depth] = '\0';
+ --radeon->debug.indent_depth;
+ }
+}
+
+void _radeon_print(const radeon_debug_type_t type,
+ const radeon_debug_level_t level,
+ const char* message,
+ ...)
+{
+ va_list values;
+
+ GET_CURRENT_CONTEXT(ctx);
+ if (ctx) {
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ // FIXME: Make this multi thread safe
+ if (radeon->debug.indent_depth)
+ fprintf(stderr, "%s", radeon->debug.indent);
+ }
+ va_start( values, message );
+ vfprintf(stderr, message, values);
+ va_end( values );
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.h
index bd8aa28e89e..449c27a3fe1 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_debug.h
@@ -1 +1,174 @@
-../radeon/radeon_debug.h \ No newline at end of file
+/*
+ * Copyright © 2009 Pauli Nieminen
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+
+#ifndef RADEON_DEBUG_H_INCLUDED
+#define RADEON_DEBUG_H_INCLUDED
+
+#include <stdlib.h>
+
+typedef enum radeon_debug_levels {
+ RADEON_CRITICAL = 0, /* Only errors */
+ RADEON_IMPORTANT = 1, /* Important warnings and messages */
+ RADEON_NORMAL = 2, /* Normal log messages usefull for debugging */
+ RADEON_VERBOSE = 3, /* Extra details to debugging */
+ RADEON_TRACE = 4 /* Log about everything that happens */
+} radeon_debug_level_t;
+
+/**
+ * Compile time option to change level of debugging compiled to dri driver.
+ * Selecting critical level is not recommended because perfromance gains are
+ * going to minimal but you will lose a lot of important warnings in case of
+ * errors.
+ */
+#ifndef RADEON_DEBUG_LEVEL
+# ifdef DEBUG
+# define RADEON_DEBUG_LEVEL RADEON_TRACE
+# else
+# define RADEON_DEBUG_LEVEL RADEON_VERBOSE
+# endif
+#endif
+
+typedef enum radeon_debug_types {
+ RADEON_TEXTURE = 0x00001,
+ RADEON_STATE = 0x00002,
+ RADEON_IOCTL = 0x00004,
+ RADEON_RENDER = 0x00008,
+ RADEON_SWRENDER = 0x00010,
+ RADEON_FALLBACKS = 0x00020,
+ RADEON_VFMT = 0x00040,
+ RADEON_SHADER = 0x00080,
+ RADEON_CS = 0x00100,
+ RADEON_DRI = 0x00200,
+ RADEON_DMA = 0x00400,
+ RADEON_SANITY = 0x00800,
+ RADEON_SYNC = 0x01000,
+ RADEON_PIXEL = 0x02000,
+ RADEON_MEMORY = 0x04000,
+ RADEON_VERTS = 0x08000,
+ RADEON_GENERAL = 0x10000 /* Used for errors and warnings */
+} radeon_debug_type_t;
+
+#define RADEON_MAX_INDENT 5
+
+struct radeon_debug {
+ size_t indent_depth;
+ char indent[RADEON_MAX_INDENT];
+};
+
+extern radeon_debug_type_t radeon_enabled_debug_types;
+
+/**
+ * Compabibility layer for old debug code
+ **/
+#define RADEON_DEBUG radeon_enabled_debug_types
+
+static inline int radeon_is_debug_enabled(const radeon_debug_type_t type,
+ const radeon_debug_level_t level)
+{
+ return RADEON_DEBUG_LEVEL >= level
+ && (type & radeon_enabled_debug_types);
+}
+/*
+ * define macro for gcc specific __attribute__ if using alternative compiler
+ */
+#ifndef __GNUC__
+#define __attribute__(x) /*empty*/
+#endif
+
+
+extern void _radeon_print(const radeon_debug_type_t type,
+ const radeon_debug_level_t level,
+ const char* message,
+ ...) __attribute__((format(printf,3,4)));
+/**
+ * Print out debug message if channel specified by type is enabled
+ * and compile time debugging level is at least as high as level parameter
+ */
+#define radeon_print(type, level, ...) do { \
+ const radeon_debug_level_t _debug_level = (level); \
+ const radeon_debug_type_t _debug_type = (type); \
+ /* Compile out if level of message is too high */ \
+ if (radeon_is_debug_enabled(type, level)) { \
+ _radeon_print(_debug_type, _debug_level, \
+ __VA_ARGS__); \
+ } \
+} while(0)
+
+/**
+ * printf style function for writing error messages.
+ */
+#define radeon_error(...) do { \
+ radeon_print(RADEON_GENERAL, RADEON_CRITICAL, \
+ __VA_ARGS__); \
+} while(0)
+
+/**
+ * printf style function for writing warnings.
+ */
+#define radeon_warning(...) do { \
+ radeon_print(RADEON_GENERAL, RADEON_IMPORTANT, \
+ __VA_ARGS__); \
+} while(0)
+
+extern void radeon_init_debug(void);
+extern void _radeon_debug_add_indent(void);
+extern void _radeon_debug_remove_indent(void);
+
+static inline void radeon_debug_add_indent(void)
+{
+ if (RADEON_DEBUG_LEVEL >= RADEON_VERBOSE) {
+ _radeon_debug_add_indent();
+ }
+}
+static inline void radeon_debug_remove_indent(void)
+{
+ if (RADEON_DEBUG_LEVEL >= RADEON_VERBOSE) {
+ _radeon_debug_remove_indent();
+ }
+}
+
+
+/* From http://gcc. gnu.org/onlinedocs/gcc-3.2.3/gcc/Variadic-Macros.html .
+ I suppose we could inline this and use macro to fetch out __LINE__ and stuff in case we run into trouble
+ with other compilers ... GLUE!
+*/
+#define WARN_ONCE(...) do { \
+ static int __warn_once=1; \
+ if(__warn_once){ \
+ radeon_warning("*********************************WARN_ONCE*********************************\n"); \
+ radeon_warning("File %s function %s line %d\n", \
+ __FILE__, __FUNCTION__, __LINE__); \
+ radeon_warning(__VA_ARGS__);\
+ radeon_warning("***************************************************************************\n"); \
+ __warn_once=0;\
+ } \
+ } while(0)
+
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.c
index 43be0006255..61cddda5dc1 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.c
@@ -1 +1,511 @@
-../radeon/radeon_dma.c \ No newline at end of file
+/**************************************************************************
+
+Copyright (C) 2004 Nicolai Haehnle.
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+
+The Weather Channel (TM) funded Tungsten Graphics to develop the
+initial release of the Radeon 8500 driver under the XFree86 license.
+This notice must be preserved.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+on the rights to use, copy, modify, merge, publish, distribute, sub
+license, and/or sell copies of the Software, and to permit persons to whom
+the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include <errno.h>
+#include "radeon_common.h"
+#include "radeon_fog.h"
+#include "main/simple_list.h"
+
+#if defined(USE_X86_ASM)
+#define COPY_DWORDS( dst, src, nr ) \
+do { \
+ int __tmp; \
+ __asm__ __volatile__( "rep ; movsl" \
+ : "=%c" (__tmp), "=D" (dst), "=S" (__tmp) \
+ : "0" (nr), \
+ "D" ((long)dst), \
+ "S" ((long)src) ); \
+} while (0)
+#else
+#define COPY_DWORDS( dst, src, nr ) \
+do { \
+ int j; \
+ for ( j = 0 ; j < nr ; j++ ) \
+ dst[j] = ((int *)src)[j]; \
+ dst += nr; \
+} while (0)
+#endif
+
+void radeonEmitVec4(uint32_t *out, const GLvoid * data, int stride, int count)
+{
+ int i;
+
+ if (RADEON_DEBUG & RADEON_VERTS)
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
+
+ if (stride == 4)
+ COPY_DWORDS(out, data, count);
+ else
+ for (i = 0; i < count; i++) {
+ out[0] = *(int *)data;
+ out++;
+ data += stride;
+ }
+}
+
+void radeonEmitVec8(uint32_t *out, const GLvoid * data, int stride, int count)
+{
+ int i;
+
+ if (RADEON_DEBUG & RADEON_VERTS)
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
+
+ if (stride == 8)
+ COPY_DWORDS(out, data, count * 2);
+ else
+ for (i = 0; i < count; i++) {
+ out[0] = *(int *)data;
+ out[1] = *(int *)(data + 4);
+ out += 2;
+ data += stride;
+ }
+}
+
+void radeonEmitVec12(uint32_t *out, const GLvoid * data, int stride, int count)
+{
+ int i;
+
+ if (RADEON_DEBUG & RADEON_VERTS)
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
+
+ if (stride == 12) {
+ COPY_DWORDS(out, data, count * 3);
+ }
+ else
+ for (i = 0; i < count; i++) {
+ out[0] = *(int *)data;
+ out[1] = *(int *)(data + 4);
+ out[2] = *(int *)(data + 8);
+ out += 3;
+ data += stride;
+ }
+}
+
+void radeonEmitVec16(uint32_t *out, const GLvoid * data, int stride, int count)
+{
+ int i;
+
+ if (RADEON_DEBUG & RADEON_VERTS)
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
+
+ if (stride == 16)
+ COPY_DWORDS(out, data, count * 4);
+ else
+ for (i = 0; i < count; i++) {
+ out[0] = *(int *)data;
+ out[1] = *(int *)(data + 4);
+ out[2] = *(int *)(data + 8);
+ out[3] = *(int *)(data + 12);
+ out += 4;
+ data += stride;
+ }
+}
+
+void rcommon_emit_vector(struct gl_context * ctx, struct radeon_aos *aos,
+ const GLvoid * data, int size, int stride, int count)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ uint32_t *out;
+
+ if (stride == 0) {
+ radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * 4, 32);
+ count = 1;
+ aos->stride = 0;
+ } else {
+ radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
+ aos->stride = size;
+ }
+
+ aos->components = size;
+ aos->count = count;
+
+ radeon_bo_map(aos->bo, 1);
+ out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
+ switch (size) {
+ case 1: radeonEmitVec4(out, data, stride, count); break;
+ case 2: radeonEmitVec8(out, data, stride, count); break;
+ case 3: radeonEmitVec12(out, data, stride, count); break;
+ case 4: radeonEmitVec16(out, data, stride, count); break;
+ default:
+ assert(0);
+ break;
+ }
+ radeon_bo_unmap(aos->bo);
+}
+
+void rcommon_emit_vecfog(struct gl_context *ctx, struct radeon_aos *aos,
+ GLvoid *data, int stride, int count)
+{
+ int i;
+ float *out;
+ int size = 1;
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+
+ if (RADEON_DEBUG & RADEON_VERTS)
+ fprintf(stderr, "%s count %d stride %d\n",
+ __FUNCTION__, count, stride);
+
+ if (stride == 0) {
+ radeonAllocDmaRegion( rmesa, &aos->bo, &aos->offset, size * 4, 32 );
+ count = 1;
+ aos->stride = 0;
+ } else {
+ radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
+ aos->stride = size;
+ }
+
+ aos->components = size;
+ aos->count = count;
+
+ /* Emit the data */
+ radeon_bo_map(aos->bo, 1);
+ out = (float*)((char*)aos->bo->ptr + aos->offset);
+ for (i = 0; i < count; i++) {
+ out[0] = radeonComputeFogBlendFactor( ctx, *(GLfloat *)data );
+ out++;
+ data += stride;
+ }
+ radeon_bo_unmap(aos->bo);
+}
+
+void radeon_init_dma(radeonContextPtr rmesa)
+{
+ make_empty_list(&rmesa->dma.free);
+ make_empty_list(&rmesa->dma.wait);
+ make_empty_list(&rmesa->dma.reserved);
+ rmesa->dma.minimum_size = MAX_DMA_BUF_SZ;
+}
+
+void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size)
+{
+ struct radeon_dma_bo *dma_bo = NULL;
+ /* we set minimum sizes to at least requested size
+ aligned to next 16 bytes. */
+ if (size > rmesa->dma.minimum_size)
+ rmesa->dma.minimum_size = (size + 15) & (~15);
+
+ radeon_print(RADEON_DMA, RADEON_NORMAL, "%s size %d minimum_size %Zi\n",
+ __FUNCTION__, size, rmesa->dma.minimum_size);
+
+ if (is_empty_list(&rmesa->dma.free)
+ || last_elem(&rmesa->dma.free)->bo->size < size) {
+ dma_bo = CALLOC_STRUCT(radeon_dma_bo);
+ assert(dma_bo);
+
+again_alloc:
+ dma_bo->bo = radeon_bo_open(rmesa->radeonScreen->bom,
+ 0, rmesa->dma.minimum_size, 4,
+ RADEON_GEM_DOMAIN_GTT, 0);
+
+ if (!dma_bo->bo) {
+ rcommonFlushCmdBuf(rmesa, __FUNCTION__);
+ goto again_alloc;
+ }
+ insert_at_head(&rmesa->dma.reserved, dma_bo);
+ } else {
+ /* We push and pop buffers from end of list so we can keep
+ counter on unused buffers for later freeing them from
+ begin of list */
+ dma_bo = last_elem(&rmesa->dma.free);
+ remove_from_list(dma_bo);
+ insert_at_head(&rmesa->dma.reserved, dma_bo);
+ }
+
+ rmesa->dma.current_used = 0;
+ rmesa->dma.current_vertexptr = 0;
+
+ if (radeon_cs_space_check_with_bo(rmesa->cmdbuf.cs,
+ first_elem(&rmesa->dma.reserved)->bo,
+ RADEON_GEM_DOMAIN_GTT, 0))
+ fprintf(stderr,"failure to revalidate BOs - badness\n");
+
+ if (is_empty_list(&rmesa->dma.reserved)) {
+ /* Cmd buff have been flushed in radeon_revalidate_bos */
+ goto again_alloc;
+ }
+ radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
+}
+
+/* Allocates a region from rmesa->dma.current. If there isn't enough
+ * space in current, grab a new buffer (and discard what was left of current)
+ */
+void radeonAllocDmaRegion(radeonContextPtr rmesa,
+ struct radeon_bo **pbo, int *poffset,
+ int bytes, int alignment)
+{
+ if (RADEON_DEBUG & RADEON_IOCTL)
+ fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
+
+ if (rmesa->dma.flush)
+ rmesa->dma.flush(rmesa->glCtx);
+
+ assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
+
+ alignment--;
+ rmesa->dma.current_used = (rmesa->dma.current_used + alignment) & ~alignment;
+
+ if (is_empty_list(&rmesa->dma.reserved)
+ || rmesa->dma.current_used + bytes > first_elem(&rmesa->dma.reserved)->bo->size)
+ radeonRefillCurrentDmaRegion(rmesa, bytes);
+
+ *poffset = rmesa->dma.current_used;
+ *pbo = first_elem(&rmesa->dma.reserved)->bo;
+ radeon_bo_ref(*pbo);
+
+ /* Always align to at least 16 bytes */
+ rmesa->dma.current_used = (rmesa->dma.current_used + bytes + 15) & ~15;
+ rmesa->dma.current_vertexptr = rmesa->dma.current_used;
+
+ assert(rmesa->dma.current_used <= first_elem(&rmesa->dma.reserved)->bo->size);
+}
+
+void radeonFreeDmaRegions(radeonContextPtr rmesa)
+{
+ struct radeon_dma_bo *dma_bo;
+ struct radeon_dma_bo *temp;
+ if (RADEON_DEBUG & RADEON_DMA)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ foreach_s(dma_bo, temp, &rmesa->dma.free) {
+ remove_from_list(dma_bo);
+ radeon_bo_unref(dma_bo->bo);
+ FREE(dma_bo);
+ }
+
+ foreach_s(dma_bo, temp, &rmesa->dma.wait) {
+ remove_from_list(dma_bo);
+ radeon_bo_unref(dma_bo->bo);
+ FREE(dma_bo);
+ }
+
+ foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
+ remove_from_list(dma_bo);
+ radeon_bo_unref(dma_bo->bo);
+ FREE(dma_bo);
+ }
+}
+
+void radeonReturnDmaRegion(radeonContextPtr rmesa, int return_bytes)
+{
+ if (is_empty_list(&rmesa->dma.reserved))
+ return;
+
+ if (RADEON_DEBUG & RADEON_IOCTL)
+ fprintf(stderr, "%s %d\n", __FUNCTION__, return_bytes);
+ rmesa->dma.current_used -= return_bytes;
+ rmesa->dma.current_vertexptr = rmesa->dma.current_used;
+}
+
+static int radeon_bo_is_idle(struct radeon_bo* bo)
+{
+ uint32_t domain;
+ int ret = radeon_bo_is_busy(bo, &domain);
+ if (ret == -EINVAL) {
+ WARN_ONCE("Your libdrm or kernel doesn't have support for busy query.\n"
+ "This may cause small performance drop for you.\n");
+ }
+ return ret != -EBUSY;
+}
+
+void radeonReleaseDmaRegions(radeonContextPtr rmesa)
+{
+ struct radeon_dma_bo *dma_bo;
+ struct radeon_dma_bo *temp;
+ const int expire_at = ++rmesa->dma.free.expire_counter + DMA_BO_FREE_TIME;
+ const int time = rmesa->dma.free.expire_counter;
+
+ if (RADEON_DEBUG & RADEON_DMA) {
+ size_t free = 0,
+ wait = 0,
+ reserved = 0;
+ foreach(dma_bo, &rmesa->dma.free)
+ ++free;
+
+ foreach(dma_bo, &rmesa->dma.wait)
+ ++wait;
+
+ foreach(dma_bo, &rmesa->dma.reserved)
+ ++reserved;
+
+ fprintf(stderr, "%s: free %zu, wait %zu, reserved %zu, minimum_size: %zu\n",
+ __FUNCTION__, free, wait, reserved, rmesa->dma.minimum_size);
+ }
+
+ /* move waiting bos to free list.
+ wait list provides gpu time to handle data before reuse */
+ foreach_s(dma_bo, temp, &rmesa->dma.wait) {
+ if (dma_bo->expire_counter == time) {
+ WARN_ONCE("Leaking dma buffer object!\n");
+ radeon_bo_unref(dma_bo->bo);
+ remove_from_list(dma_bo);
+ FREE(dma_bo);
+ continue;
+ }
+ /* free objects that are too small to be used because of large request */
+ if (dma_bo->bo->size < rmesa->dma.minimum_size) {
+ radeon_bo_unref(dma_bo->bo);
+ remove_from_list(dma_bo);
+ FREE(dma_bo);
+ continue;
+ }
+ if (!radeon_bo_is_idle(dma_bo->bo)) {
+ break;
+ }
+ remove_from_list(dma_bo);
+ dma_bo->expire_counter = expire_at;
+ insert_at_tail(&rmesa->dma.free, dma_bo);
+ }
+
+ /* move reserved to wait list */
+ foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
+ radeon_bo_unmap(dma_bo->bo);
+ /* free objects that are too small to be used because of large request */
+ if (dma_bo->bo->size < rmesa->dma.minimum_size) {
+ radeon_bo_unref(dma_bo->bo);
+ remove_from_list(dma_bo);
+ FREE(dma_bo);
+ continue;
+ }
+ remove_from_list(dma_bo);
+ dma_bo->expire_counter = expire_at;
+ insert_at_tail(&rmesa->dma.wait, dma_bo);
+ }
+
+ /* free bos that have been unused for some time */
+ foreach_s(dma_bo, temp, &rmesa->dma.free) {
+ if (dma_bo->expire_counter != time)
+ break;
+ remove_from_list(dma_bo);
+ radeon_bo_unref(dma_bo->bo);
+ FREE(dma_bo);
+ }
+
+}
+
+
+/* Flush vertices in the current dma region.
+ */
+void rcommon_flush_last_swtcl_prim( struct gl_context *ctx )
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ struct radeon_dma *dma = &rmesa->dma;
+
+ if (RADEON_DEBUG & RADEON_IOCTL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+ dma->flush = NULL;
+
+ radeon_bo_unmap(rmesa->swtcl.bo);
+
+ if (!is_empty_list(&dma->reserved)) {
+ GLuint current_offset = dma->current_used;
+
+ assert (dma->current_used +
+ rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
+ dma->current_vertexptr);
+
+ if (dma->current_used != dma->current_vertexptr) {
+ dma->current_used = dma->current_vertexptr;
+
+ rmesa->vtbl.swtcl_flush(ctx, current_offset);
+ }
+ rmesa->swtcl.numverts = 0;
+ }
+ radeon_bo_unref(rmesa->swtcl.bo);
+ rmesa->swtcl.bo = NULL;
+}
+/* Alloc space in the current dma region.
+ */
+void *
+rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
+{
+ GLuint bytes = vsize * nverts;
+ void *head;
+ if (RADEON_DEBUG & RADEON_IOCTL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ if(is_empty_list(&rmesa->dma.reserved)
+ ||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
+ if (rmesa->dma.flush) {
+ rmesa->dma.flush(rmesa->glCtx);
+ }
+
+ radeonRefillCurrentDmaRegion(rmesa, bytes);
+
+ return NULL;
+ }
+
+ if (!rmesa->dma.flush) {
+ /* if cmdbuf flushed DMA restart */
+ rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
+ rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
+ }
+
+ ASSERT( vsize == rmesa->swtcl.vertex_size * 4 );
+ ASSERT( rmesa->dma.flush == rcommon_flush_last_swtcl_prim );
+ ASSERT( rmesa->dma.current_used +
+ rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
+ rmesa->dma.current_vertexptr );
+
+ if (!rmesa->swtcl.bo) {
+ rmesa->swtcl.bo = first_elem(&rmesa->dma.reserved)->bo;
+ radeon_bo_ref(rmesa->swtcl.bo);
+ radeon_bo_map(rmesa->swtcl.bo, 1);
+ }
+
+ head = (rmesa->swtcl.bo->ptr + rmesa->dma.current_vertexptr);
+ rmesa->dma.current_vertexptr += bytes;
+ rmesa->swtcl.numverts += nverts;
+ return head;
+}
+
+void radeonReleaseArrays( struct gl_context *ctx, GLuint newinputs )
+{
+ radeonContextPtr radeon = RADEON_CONTEXT( ctx );
+ int i;
+ if (RADEON_DEBUG & RADEON_IOCTL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ if (radeon->dma.flush) {
+ radeon->dma.flush(radeon->glCtx);
+ }
+ for (i = 0; i < radeon->tcl.aos_count; i++) {
+ if (radeon->tcl.aos[i].bo) {
+ radeon_bo_unref(radeon->tcl.aos[i].bo);
+ radeon->tcl.aos[i].bo = NULL;
+
+ }
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.h
index 82e50634e3c..db7b84ebd1e 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_dma.h
@@ -1 +1,60 @@
-../radeon/radeon_dma.h \ No newline at end of file
+/**************************************************************************
+
+Copyright (C) 2004 Nicolai Haehnle.
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+
+The Weather Channel (TM) funded Tungsten Graphics to develop the
+initial release of the Radeon 8500 driver under the XFree86 license.
+This notice must be preserved.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+on the rights to use, copy, modify, merge, publish, distribute, sub
+license, and/or sell copies of the Software, and to permit persons to whom
+the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifndef RADEON_DMA_H
+#define RADEON_DMA_H
+
+void radeonEmitVec4(uint32_t *out, const GLvoid * data, int stride, int count);
+void radeonEmitVec8(uint32_t *out, const GLvoid * data, int stride, int count);
+void radeonEmitVec12(uint32_t *out, const GLvoid * data, int stride, int count);
+void radeonEmitVec16(uint32_t *out, const GLvoid * data, int stride, int count);
+
+void rcommon_emit_vector(struct gl_context * ctx, struct radeon_aos *aos,
+ const GLvoid * data, int size, int stride, int count);
+void rcommon_emit_vecfog(struct gl_context *ctx, struct radeon_aos *aos,
+ GLvoid *data, int stride, int count);
+
+void radeonReturnDmaRegion(radeonContextPtr rmesa, int return_bytes);
+void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size);
+void radeon_init_dma(radeonContextPtr rmesa);
+void radeonReturnDmaRegion(radeonContextPtr rmesa, int return_bytes);
+void radeonAllocDmaRegion(radeonContextPtr rmesa,
+ struct radeon_bo **pbo, int *poffset,
+ int bytes, int alignment);
+void radeonReleaseDmaRegions(radeonContextPtr rmesa);
+
+void rcommon_flush_last_swtcl_prim(struct gl_context *ctx);
+
+void *rcommonAllocDmaLowVerts(radeonContextPtr rmesa, int nverts, int vsize);
+void radeonFreeDmaRegions(radeonContextPtr rmesa);
+void radeonReleaseArrays( struct gl_context *ctx, GLuint newinputs );
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fbo.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fbo.c
index 0d738d8d780..f597f35c0f7 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fbo.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fbo.c
@@ -1 +1,980 @@
-../radeon/radeon_fbo.c \ No newline at end of file
+/**************************************************************************
+ *
+ * Copyright 2008 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/imports.h"
+#include "main/macros.h"
+#include "main/mfeatures.h"
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/context.h"
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "radeon_common.h"
+#include "radeon_mipmap_tree.h"
+
+#define FILE_DEBUG_FLAG RADEON_TEXTURE
+#define DBG(...) do { \
+ if (RADEON_DEBUG & FILE_DEBUG_FLAG) \
+ printf(__VA_ARGS__); \
+} while(0)
+
+static struct gl_framebuffer *
+radeon_new_framebuffer(struct gl_context *ctx, GLuint name)
+{
+ return _mesa_new_framebuffer(ctx, name);
+}
+
+static void
+radeon_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
+{
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(rb %p, rrb %p) \n",
+ __func__, rb, rrb);
+
+ ASSERT(rrb);
+
+ if (rrb && rrb->bo) {
+ radeon_bo_unref(rrb->bo);
+ }
+ _mesa_delete_renderbuffer(ctx, rb);
+}
+
+#if defined(RADEON_R100)
+static GLuint get_depth_z32(const struct radeon_renderbuffer * rrb,
+ GLint x, GLint y)
+{
+ GLuint ba, address = 0;
+
+ ba = (y >> 4) * (rrb->pitch >> 6) + (x >> 4);
+
+ address |= (x & 0x7) << 2;
+ address |= (y & 0x3) << 5;
+ address |= (((x & 0x10) >> 2) ^ (y & 0x4)) << 5;
+ address |= (ba & 3) << 8;
+ address |= (y & 0x8) << 7;
+ address |= (((x & 0x8) << 1) ^ (y & 0x10)) << 7;
+ address |= (ba & ~0x3) << 10;
+ return address;
+}
+
+static GLuint get_depth_z16(const struct radeon_renderbuffer * rrb,
+ GLint x, GLint y)
+{
+ GLuint ba, address = 0; /* a[0] = 0 */
+
+ ba = (y / 16) * (rrb->pitch >> 6) + (x / 32);
+
+ address |= (x & 0x7) << 1; /* a[1..3] = x[0..2] */
+ address |= (y & 0x7) << 4; /* a[4..6] = y[0..2] */
+ address |= (x & 0x8) << 4; /* a[7] = x[3] */
+ address |= (ba & 0x3) << 8; /* a[8..9] = ba[0..1] */
+ address |= (y & 0x8) << 7; /* a[10] = y[3] */
+ address |= ((x & 0x10) ^ (y & 0x10)) << 7;/* a[11] = x[4] ^ y[4] */
+ address |= (ba & ~0x3) << 10; /* a[12..] = ba[2..] */
+ return address;
+}
+#endif
+
+#if defined(RADEON_R200)
+static GLuint get_depth_z32(const struct radeon_renderbuffer * rrb,
+ GLint x, GLint y)
+{
+ GLuint offset;
+ GLuint b;
+ offset = 0;
+ b = (((y & 0x7ff) >> 4) * (rrb->pitch >> 7) + (x >> 5));
+ offset += (b >> 1) << 12;
+ offset += (((rrb->pitch >> 7) & 0x1) ? (b & 0x1) : ((b & 0x1) ^ ((y >> 4) & 0x1))) << 11;
+ offset += ((y >> 2) & 0x3) << 9;
+ offset += ((x >> 2) & 0x1) << 8;
+ offset += ((x >> 3) & 0x3) << 6;
+ offset += ((y >> 1) & 0x1) << 5;
+ offset += ((x >> 1) & 0x1) << 4;
+ offset += (y & 0x1) << 3;
+ offset += (x & 0x1) << 2;
+
+ return offset;
+}
+
+static GLuint get_depth_z16(const struct radeon_renderbuffer *rrb,
+ GLint x, GLint y)
+{
+ GLuint offset;
+ GLuint b;
+
+ offset = 0;
+ b = (((y >> 4) * (rrb->pitch >> 7) + (x >> 6)));
+ offset += (b >> 1) << 12;
+ offset += (((rrb->pitch >> 7) & 0x1) ? (b & 0x1) : ((b & 0x1) ^ ((y >> 4) & 0x1))) << 11;
+ offset += ((y >> 2) & 0x3) << 9;
+ offset += ((x >> 3) & 0x1) << 8;
+ offset += ((x >> 4) & 0x3) << 6;
+ offset += ((x >> 2) & 0x1) << 5;
+ offset += ((y >> 1) & 0x1) << 4;
+ offset += ((x >> 1) & 0x1) << 3;
+ offset += (y & 0x1) << 2;
+ offset += (x & 0x1) << 1;
+
+ return offset;
+}
+#endif
+
+static void
+radeon_map_renderbuffer_s8z24(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **out_map,
+ GLint *out_stride)
+{
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+ uint32_t *untiled_s8z24_map, *tiled_s8z24_map;
+ int ret;
+ int y_flip = (rb->Name == 0) ? -1 : 1;
+ int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
+ uint32_t pitch = w * rrb->cpp;
+
+ rrb->map_pitch = pitch;
+
+ rrb->map_buffer = malloc(w * h * 4);
+ ret = radeon_bo_map(rrb->bo, !!(mode & GL_MAP_WRITE_BIT));
+ assert(!ret);
+ untiled_s8z24_map = rrb->map_buffer;
+ tiled_s8z24_map = rrb->bo->ptr;
+
+ for (uint32_t pix_y = 0; pix_y < h; ++ pix_y) {
+ for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
+ uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
+ uint32_t src_offset = get_depth_z32(rrb, x + pix_x, flipped_y);
+ uint32_t dst_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
+ untiled_s8z24_map[dst_offset/4] = tiled_s8z24_map[src_offset/4];
+ }
+ }
+
+ radeon_bo_unmap(rrb->bo);
+
+ *out_map = rrb->map_buffer;
+ *out_stride = rrb->map_pitch;
+}
+
+static void
+radeon_map_renderbuffer_z16(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **out_map,
+ GLint *out_stride)
+{
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+ uint16_t *untiled_z16_map, *tiled_z16_map;
+ int ret;
+ int y_flip = (rb->Name == 0) ? -1 : 1;
+ int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
+ uint32_t pitch = w * rrb->cpp;
+
+ rrb->map_pitch = pitch;
+
+ rrb->map_buffer = malloc(w * h * 2);
+ ret = radeon_bo_map(rrb->bo, !!(mode & GL_MAP_WRITE_BIT));
+ assert(!ret);
+
+ untiled_z16_map = rrb->map_buffer;
+ tiled_z16_map = rrb->bo->ptr;
+
+ for (uint32_t pix_y = 0; pix_y < h; ++ pix_y) {
+ for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
+ uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
+ uint32_t src_offset = get_depth_z16(rrb, x + pix_x, flipped_y);
+ uint32_t dst_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
+ untiled_z16_map[dst_offset/2] = tiled_z16_map[src_offset/2];
+ }
+ }
+
+ radeon_bo_unmap(rrb->bo);
+
+ *out_map = rrb->map_buffer;
+ *out_stride = rrb->map_pitch;
+}
+
+static void
+radeon_map_renderbuffer(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **out_map,
+ GLint *out_stride)
+{
+ struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+ GLubyte *map;
+ GLboolean ok;
+ int stride, flip_stride;
+ int ret;
+ int src_x, src_y;
+
+ if (!rrb || !rrb->bo) {
+ *out_map = NULL;
+ *out_stride = 0;
+ return;
+ }
+
+ rrb->map_mode = mode;
+ rrb->map_x = x;
+ rrb->map_y = y;
+ rrb->map_w = w;
+ rrb->map_h = h;
+ rrb->map_pitch = rrb->pitch;
+
+ ok = rmesa->vtbl.check_blit(rb->Format, rrb->pitch / rrb->cpp);
+ if (ok) {
+ if (rb->Name) {
+ src_x = x;
+ src_y = y;
+ } else {
+ src_x = x;
+ src_y = rrb->base.Base.Height - y - h;
+ }
+
+ /* Make a temporary buffer and blit the current contents of the renderbuffer
+ * out to it. This gives us linear access to the buffer, instead of having
+ * to do detiling in software.
+ */
+
+ rrb->map_pitch = rrb->pitch;
+
+ assert(!rrb->map_bo);
+ rrb->map_bo = radeon_bo_open(rmesa->radeonScreen->bom, 0,
+ rrb->map_pitch * h, 4,
+ RADEON_GEM_DOMAIN_GTT, 0);
+
+ ok = rmesa->vtbl.blit(ctx, rrb->bo, rrb->draw_offset,
+ rb->Format, rrb->pitch / rrb->cpp,
+ rb->Width, rb->Height,
+ src_x, src_y,
+ rrb->map_bo, 0,
+ rb->Format, rrb->map_pitch / rrb->cpp,
+ w, h,
+ 0, 0,
+ w, h,
+ GL_FALSE);
+ assert(ok);
+
+ ret = radeon_bo_map(rrb->map_bo, !!(mode & GL_MAP_WRITE_BIT));
+ assert(!ret);
+
+ map = rrb->map_bo->ptr;
+
+ if (rb->Name) {
+ *out_map = map;
+ *out_stride = rrb->map_pitch;
+ } else {
+ *out_map = map + (h - 1) * rrb->map_pitch;
+ *out_stride = -rrb->map_pitch;
+ }
+ return;
+ }
+
+ /* sw fallback flush stuff */
+ if (radeon_bo_is_referenced_by_cs(rrb->bo, rmesa->cmdbuf.cs)) {
+ radeon_firevertices(rmesa);
+ }
+
+ if ((rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_DEPTH_ALWAYS_TILED) && !rrb->has_surface) {
+ if (rb->Format == MESA_FORMAT_S8_Z24 || rb->Format == MESA_FORMAT_X8_Z24) {
+ radeon_map_renderbuffer_s8z24(ctx, rb, x, y, w, h,
+ mode, out_map, out_stride);
+ return;
+ }
+ if (rb->Format == MESA_FORMAT_Z16) {
+ radeon_map_renderbuffer_z16(ctx, rb, x, y, w, h,
+ mode, out_map, out_stride);
+ return;
+ }
+ }
+
+ ret = radeon_bo_map(rrb->bo, !!(mode & GL_MAP_WRITE_BIT));
+ assert(!ret);
+
+ map = rrb->bo->ptr;
+ stride = rrb->map_pitch;
+
+ if (rb->Name == 0) {
+ y = rb->Height - 1 - y;
+ flip_stride = -stride;
+ } else {
+ flip_stride = stride;
+ map += rrb->draw_offset;
+ }
+
+ map += x * rrb->cpp;
+ map += (int)y * stride;
+
+ *out_map = map;
+ *out_stride = flip_stride;
+}
+
+static void
+radeon_unmap_renderbuffer_s8z24(struct gl_context *ctx,
+ struct gl_renderbuffer *rb)
+{
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+
+ if (!rrb->map_buffer)
+ return;
+
+ if (rrb->map_mode & GL_MAP_WRITE_BIT) {
+ uint32_t *untiled_s8z24_map = rrb->map_buffer;
+ uint32_t *tiled_s8z24_map;
+ int y_flip = (rb->Name == 0) ? -1 : 1;
+ int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
+
+ radeon_bo_map(rrb->bo, 1);
+
+ tiled_s8z24_map = rrb->bo->ptr;
+
+ for (uint32_t pix_y = 0; pix_y < rrb->map_h; pix_y++) {
+ for (uint32_t pix_x = 0; pix_x < rrb->map_w; pix_x++) {
+ uint32_t flipped_y = y_flip * (int32_t)(pix_y + rrb->map_y) + y_bias;
+ uint32_t dst_offset = get_depth_z32(rrb, rrb->map_x + pix_x, flipped_y);
+ uint32_t src_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
+ tiled_s8z24_map[dst_offset/4] = untiled_s8z24_map[src_offset/4];
+ }
+ }
+ radeon_bo_unmap(rrb->bo);
+ }
+ free(rrb->map_buffer);
+ rrb->map_buffer = NULL;
+}
+
+static void
+radeon_unmap_renderbuffer_z16(struct gl_context *ctx,
+ struct gl_renderbuffer *rb)
+{
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+
+ if (!rrb->map_buffer)
+ return;
+
+ if (rrb->map_mode & GL_MAP_WRITE_BIT) {
+ uint16_t *untiled_z16_map = rrb->map_buffer;
+ uint16_t *tiled_z16_map;
+ int y_flip = (rb->Name == 0) ? -1 : 1;
+ int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
+
+ radeon_bo_map(rrb->bo, 1);
+
+ tiled_z16_map = rrb->bo->ptr;
+
+ for (uint32_t pix_y = 0; pix_y < rrb->map_h; pix_y++) {
+ for (uint32_t pix_x = 0; pix_x < rrb->map_w; pix_x++) {
+ uint32_t flipped_y = y_flip * (int32_t)(pix_y + rrb->map_y) + y_bias;
+ uint32_t dst_offset = get_depth_z16(rrb, rrb->map_x + pix_x, flipped_y);
+ uint32_t src_offset = pix_y * rrb->map_pitch + pix_x * rrb->cpp;
+ tiled_z16_map[dst_offset/2] = untiled_z16_map[src_offset/2];
+ }
+ }
+ radeon_bo_unmap(rrb->bo);
+ }
+ free(rrb->map_buffer);
+ rrb->map_buffer = NULL;
+}
+
+
+static void
+radeon_unmap_renderbuffer(struct gl_context *ctx,
+ struct gl_renderbuffer *rb)
+{
+ struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+ GLboolean ok;
+
+ if ((rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_DEPTH_ALWAYS_TILED) && !rrb->has_surface) {
+ if (rb->Format == MESA_FORMAT_S8_Z24 || rb->Format == MESA_FORMAT_X8_Z24) {
+ radeon_unmap_renderbuffer_s8z24(ctx, rb);
+ return;
+ }
+ if (rb->Format == MESA_FORMAT_Z16) {
+ radeon_unmap_renderbuffer_z16(ctx, rb);
+ return;
+ }
+ }
+
+ if (!rrb->map_bo) {
+ if (rrb->bo)
+ radeon_bo_unmap(rrb->bo);
+ return;
+ }
+
+ radeon_bo_unmap(rrb->map_bo);
+
+ if (rrb->map_mode & GL_MAP_WRITE_BIT) {
+ ok = rmesa->vtbl.blit(ctx, rrb->map_bo, 0,
+ rb->Format, rrb->map_pitch / rrb->cpp,
+ rrb->map_w, rrb->map_h,
+ 0, 0,
+ rrb->bo, rrb->draw_offset,
+ rb->Format, rrb->pitch / rrb->cpp,
+ rb->Width, rb->Height,
+ rrb->map_x, rrb->map_y,
+ rrb->map_w, rrb->map_h,
+ GL_FALSE);
+ assert(ok);
+ }
+
+ radeon_bo_unref(rrb->map_bo);
+ rrb->map_bo = NULL;
+}
+
+
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+static GLboolean
+radeon_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct radeon_context *radeon = RADEON_CONTEXT(ctx);
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+ uint32_t size, pitch;
+ int cpp;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, rb %p) \n",
+ __func__, ctx, rb);
+
+ ASSERT(rb->Name != 0);
+ switch (internalFormat) {
+ case GL_R3_G3_B2:
+ case GL_RGB4:
+ case GL_RGB5:
+ rb->Format = _radeon_texformat_rgb565;
+ cpp = 2;
+ break;
+ case GL_RGB:
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ rb->Format = _radeon_texformat_argb8888;
+ cpp = 4;
+ break;
+ case GL_RGBA:
+ case GL_RGBA2:
+ case GL_RGBA4:
+ case GL_RGB5_A1:
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ rb->Format = _radeon_texformat_argb8888;
+ cpp = 4;
+ break;
+ case GL_STENCIL_INDEX:
+ case GL_STENCIL_INDEX1_EXT:
+ case GL_STENCIL_INDEX4_EXT:
+ case GL_STENCIL_INDEX8_EXT:
+ case GL_STENCIL_INDEX16_EXT:
+ /* alloc a depth+stencil buffer */
+ rb->Format = MESA_FORMAT_S8_Z24;
+ cpp = 4;
+ break;
+ case GL_DEPTH_COMPONENT16:
+ rb->Format = MESA_FORMAT_Z16;
+ cpp = 2;
+ break;
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+ rb->Format = MESA_FORMAT_X8_Z24;
+ cpp = 4;
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ case GL_DEPTH24_STENCIL8_EXT:
+ rb->Format = MESA_FORMAT_S8_Z24;
+ cpp = 4;
+ break;
+ default:
+ _mesa_problem(ctx,
+ "Unexpected format in radeon_alloc_renderbuffer_storage");
+ return GL_FALSE;
+ }
+
+ rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
+
+ if (ctx->Driver.Flush)
+ ctx->Driver.Flush(ctx); /* +r6/r7 */
+
+ if (rrb->bo)
+ radeon_bo_unref(rrb->bo);
+
+ pitch = ((cpp * width + 63) & ~63) / cpp;
+
+ if (RADEON_DEBUG & RADEON_MEMORY)
+ fprintf(stderr,"Allocating %d x %d radeon RBO (pitch %d)\n", width,
+ height, pitch);
+
+ size = pitch * height * cpp;
+ rrb->pitch = pitch * cpp;
+ rrb->cpp = cpp;
+ rrb->bo = radeon_bo_open(radeon->radeonScreen->bom,
+ 0,
+ size,
+ 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ 0);
+ rb->Width = width;
+ rb->Height = height;
+ return GL_TRUE;
+}
+
+#if FEATURE_OES_EGL_image
+static void
+radeon_image_target_renderbuffer_storage(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ void *image_handle)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_renderbuffer *rrb;
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = radeon->radeonScreen->driScreen;
+ image = screen->dri2.image->lookupEGLImage(screen, image_handle,
+ screen->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ rrb = radeon_renderbuffer(rb);
+
+ if (ctx->Driver.Flush)
+ ctx->Driver.Flush(ctx); /* +r6/r7 */
+
+ if (rrb->bo)
+ radeon_bo_unref(rrb->bo);
+ rrb->bo = image->bo;
+ radeon_bo_ref(rrb->bo);
+ fprintf(stderr, "image->bo: %p, name: %d, rbs: w %d -> p %d\n", image->bo, image->bo->handle,
+ image->width, image->pitch);
+
+ rrb->cpp = image->cpp;
+ rrb->pitch = image->pitch * image->cpp;
+
+ rb->Format = image->format;
+ rb->InternalFormat = image->internal_format;
+ rb->Width = image->width;
+ rb->Height = image->height;
+ rb->Format = image->format;
+ rb->_BaseFormat = _mesa_base_fbo_format(radeon->glCtx,
+ image->internal_format);
+}
+#endif
+
+/**
+ * Called for each hardware renderbuffer when a _window_ is resized.
+ * Just update fields.
+ * Not used for user-created renderbuffers!
+ */
+static GLboolean
+radeon_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ ASSERT(rb->Name == 0);
+ rb->Width = width;
+ rb->Height = height;
+ rb->InternalFormat = internalFormat;
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, rb %p) \n",
+ __func__, ctx, rb);
+
+
+ return GL_TRUE;
+}
+
+
+static void
+radeon_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
+ GLuint width, GLuint height)
+{
+ struct radeon_framebuffer *radeon_fb = (struct radeon_framebuffer*)fb;
+ int i;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, fb %p) \n",
+ __func__, ctx, fb);
+
+ _mesa_resize_framebuffer(ctx, fb, width, height);
+
+ fb->Initialized = GL_TRUE; /* XXX remove someday */
+
+ if (fb->Name != 0) {
+ return;
+ }
+
+ /* Make sure all window system renderbuffers are up to date */
+ for (i = 0; i < 2; i++) {
+ struct gl_renderbuffer *rb = &radeon_fb->color_rb[i]->base.Base;
+
+ /* only resize if size is changing */
+ if (rb && (rb->Width != width || rb->Height != height)) {
+ rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
+ }
+ }
+}
+
+
+/** Dummy function for gl_renderbuffer::AllocStorage() */
+static GLboolean
+radeon_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ _mesa_problem(ctx, "radeon_op_alloc_storage should never be called.");
+ return GL_FALSE;
+}
+
+
+/**
+ * Create a renderbuffer for a window's color, depth and/or stencil buffer.
+ * Not used for user-created renderbuffers.
+ */
+struct radeon_renderbuffer *
+radeon_create_renderbuffer(gl_format format, __DRIdrawable *driDrawPriv)
+{
+ struct radeon_renderbuffer *rrb;
+ struct gl_renderbuffer *rb;
+
+ rrb = CALLOC_STRUCT(radeon_renderbuffer);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s( rrb %p ) \n",
+ __func__, rrb);
+
+ if (!rrb)
+ return NULL;
+
+ rb = &rrb->base.Base;
+
+ _mesa_init_renderbuffer(rb, 0);
+ rb->ClassID = RADEON_RB_CLASS;
+ rb->Format = format;
+ rb->_BaseFormat = _mesa_get_format_base_format(format);
+ rb->InternalFormat = _mesa_get_format_base_format(format);
+
+ rrb->dPriv = driDrawPriv;
+
+ rb->Delete = radeon_delete_renderbuffer;
+ rb->AllocStorage = radeon_alloc_window_storage;
+
+ rrb->bo = NULL;
+ return rrb;
+}
+
+static struct gl_renderbuffer *
+radeon_new_renderbuffer(struct gl_context * ctx, GLuint name)
+{
+ struct radeon_renderbuffer *rrb;
+ struct gl_renderbuffer *rb;
+
+
+ rrb = CALLOC_STRUCT(radeon_renderbuffer);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, rrb %p) \n",
+ __func__, ctx, rrb);
+
+ if (!rrb)
+ return NULL;
+
+ rb = &rrb->base.Base;
+
+ _mesa_init_renderbuffer(rb, name);
+ rb->ClassID = RADEON_RB_CLASS;
+ rb->Delete = radeon_delete_renderbuffer;
+ rb->AllocStorage = radeon_alloc_renderbuffer_storage;
+
+ return rb;
+}
+
+static void
+radeon_bind_framebuffer(struct gl_context * ctx, GLenum target,
+ struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
+{
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, fb %p, target %s) \n",
+ __func__, ctx, fb,
+ _mesa_lookup_enum_by_nr(target));
+
+ if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ radeon_draw_buffer(ctx, fb);
+ }
+ else {
+ /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
+ }
+}
+
+static void
+radeon_framebuffer_renderbuffer(struct gl_context * ctx,
+ struct gl_framebuffer *fb,
+ GLenum attachment, struct gl_renderbuffer *rb)
+{
+
+ if (ctx->Driver.Flush)
+ ctx->Driver.Flush(ctx); /* +r6/r7 */
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, fb %p, rb %p) \n",
+ __func__, ctx, fb, rb);
+
+ _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
+ radeon_draw_buffer(ctx, fb);
+}
+
+static GLboolean
+radeon_update_wrapper(struct gl_context *ctx, struct radeon_renderbuffer *rrb,
+ struct gl_texture_image *texImage)
+{
+ struct gl_renderbuffer *rb = &rrb->base.Base;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, rrb %p, texImage %p, texFormat %s) \n",
+ __func__, ctx, rrb, texImage, _mesa_get_format_name(texImage->TexFormat));
+
+ rrb->cpp = _mesa_get_format_bytes(texImage->TexFormat);
+ rrb->pitch = texImage->Width * rrb->cpp;
+ rb->Format = texImage->TexFormat;
+ rb->InternalFormat = texImage->InternalFormat;
+ rb->_BaseFormat = _mesa_base_fbo_format(ctx, rb->InternalFormat);
+ rb->Width = texImage->Width;
+ rb->Height = texImage->Height;
+ rb->Delete = radeon_delete_renderbuffer;
+ rb->AllocStorage = radeon_nop_alloc_storage;
+
+ return GL_TRUE;
+}
+
+
+static struct radeon_renderbuffer *
+radeon_wrap_texture(struct gl_context * ctx, struct gl_texture_image *texImage)
+{
+ const GLuint name = ~0; /* not significant, but distinct for debugging */
+ struct radeon_renderbuffer *rrb;
+
+ /* make an radeon_renderbuffer to wrap the texture image */
+ rrb = CALLOC_STRUCT(radeon_renderbuffer);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, rrb %p, texImage %p) \n",
+ __func__, ctx, rrb, texImage);
+
+ if (!rrb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&rrb->base.Base, name);
+ rrb->base.Base.ClassID = RADEON_RB_CLASS;
+
+ if (!radeon_update_wrapper(ctx, rrb, texImage)) {
+ free(rrb);
+ return NULL;
+ }
+
+ return rrb;
+
+}
+static void
+radeon_render_texture(struct gl_context * ctx,
+ struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct gl_texture_image *newImage
+ = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(att->Renderbuffer);
+ radeon_texture_image *radeon_image;
+ GLuint imageOffset;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, fb %p, rrb %p, att %p)\n",
+ __func__, ctx, fb, rrb, att);
+
+ (void) fb;
+
+ ASSERT(newImage);
+
+ radeon_image = (radeon_texture_image *)newImage;
+
+ if (!radeon_image->mt) {
+ /* Fallback on drawing to a texture without a miptree.
+ */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+ else if (!rrb) {
+ rrb = radeon_wrap_texture(ctx, newImage);
+ if (rrb) {
+ /* bind the wrapper to the attachment point */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, &rrb->base.Base);
+ }
+ else {
+ /* fallback to software rendering */
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+ }
+
+ if (!radeon_update_wrapper(ctx, rrb, newImage)) {
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _swrast_render_texture(ctx, fb, att);
+ return;
+ }
+
+ DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
+ _glthread_GetID(),
+ att->Texture->Name, newImage->Width, newImage->Height,
+ rrb->base.Base.RefCount);
+
+ /* point the renderbufer's region to the texture image region */
+ if (rrb->bo != radeon_image->mt->bo) {
+ if (rrb->bo)
+ radeon_bo_unref(rrb->bo);
+ rrb->bo = radeon_image->mt->bo;
+ radeon_bo_ref(rrb->bo);
+ }
+
+ /* compute offset of the particular 2D image within the texture region */
+ imageOffset = radeon_miptree_image_offset(radeon_image->mt,
+ att->CubeMapFace,
+ att->TextureLevel);
+
+ if (att->Texture->Target == GL_TEXTURE_3D) {
+ imageOffset += radeon_image->mt->levels[att->TextureLevel].rowstride *
+ radeon_image->mt->levels[att->TextureLevel].height *
+ att->Zoffset;
+ }
+
+ /* store that offset in the region, along with the correct pitch for
+ * the image we are rendering to */
+ rrb->draw_offset = imageOffset;
+ rrb->pitch = radeon_image->mt->levels[att->TextureLevel].rowstride;
+ radeon_image->used_as_render_target = GL_TRUE;
+
+ /* update drawing region, etc */
+ radeon_draw_buffer(ctx, fb);
+}
+
+static void
+radeon_finish_render_texture(struct gl_context * ctx,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct gl_texture_object *tex_obj = att->Texture;
+ struct gl_texture_image *image =
+ tex_obj->Image[att->CubeMapFace][att->TextureLevel];
+ radeon_texture_image *radeon_image = (radeon_texture_image *)image;
+
+ if (radeon_image)
+ radeon_image->used_as_render_target = GL_FALSE;
+
+ if (ctx->Driver.Flush)
+ ctx->Driver.Flush(ctx); /* +r6/r7 */
+}
+static void
+radeon_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ gl_format mesa_format;
+ int i;
+
+ for (i = -2; i < (GLint) ctx->Const.MaxColorAttachments; i++) {
+ struct gl_renderbuffer_attachment *att;
+ if (i == -2) {
+ att = &fb->Attachment[BUFFER_DEPTH];
+ } else if (i == -1) {
+ att = &fb->Attachment[BUFFER_STENCIL];
+ } else {
+ att = &fb->Attachment[BUFFER_COLOR0 + i];
+ }
+
+ if (att->Type == GL_TEXTURE) {
+ mesa_format = att->Texture->Image[att->CubeMapFace][att->TextureLevel]->TexFormat;
+ } else {
+ /* All renderbuffer formats are renderable, but not sampable */
+ continue;
+ }
+
+ if (!radeon->vtbl.is_format_renderable(mesa_format)){
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s: HW doesn't support format %s as output format of attachment %d\n",
+ __FUNCTION__, _mesa_get_format_name(mesa_format), i);
+ return;
+ }
+ }
+}
+
+void radeon_fbo_init(struct radeon_context *radeon)
+{
+#if FEATURE_EXT_framebuffer_object
+ radeon->glCtx->Driver.NewFramebuffer = radeon_new_framebuffer;
+ radeon->glCtx->Driver.NewRenderbuffer = radeon_new_renderbuffer;
+ radeon->glCtx->Driver.MapRenderbuffer = radeon_map_renderbuffer;
+ radeon->glCtx->Driver.UnmapRenderbuffer = radeon_unmap_renderbuffer;
+ radeon->glCtx->Driver.BindFramebuffer = radeon_bind_framebuffer;
+ radeon->glCtx->Driver.FramebufferRenderbuffer = radeon_framebuffer_renderbuffer;
+ radeon->glCtx->Driver.RenderTexture = radeon_render_texture;
+ radeon->glCtx->Driver.FinishRenderTexture = radeon_finish_render_texture;
+ radeon->glCtx->Driver.ResizeBuffers = radeon_resize_buffers;
+ radeon->glCtx->Driver.ValidateFramebuffer = radeon_validate_framebuffer;
+#endif
+#if FEATURE_EXT_framebuffer_blit
+ radeon->glCtx->Driver.BlitFramebuffer = _mesa_meta_BlitFramebuffer;
+#endif
+#if FEATURE_OES_EGL_image
+ radeon->glCtx->Driver.EGLImageTargetRenderbufferStorage =
+ radeon_image_target_renderbuffer_storage;
+#endif
+}
+
+
+void radeon_renderbuffer_set_bo(struct radeon_renderbuffer *rb,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo *old;
+ old = rb->bo;
+ rb->bo = bo;
+ radeon_bo_ref(bo);
+ if (old)
+ radeon_bo_unref(old);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.c
index 231aa4ffcb3..bd2642b1304 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.c
@@ -1 +1,125 @@
-../radeon/radeon_fog.c \ No newline at end of file
+/**************************************************************************
+
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
+ Tungsten Graphics Inc., Austin, Texas.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "main/glheader.h"
+#include "main/imports.h"
+#include "main/context.h"
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/macros.h"
+
+#include "radeon_fog.h"
+
+/**********************************************************************/
+/* Fog blend factor computation for hw tcl */
+/* same calculation used as in t_vb_fog.c */
+/**********************************************************************/
+
+#define FOG_EXP_TABLE_SIZE 256
+#define FOG_MAX (10.0)
+#define EXP_FOG_MAX .0006595
+#define FOG_INCR (FOG_MAX/FOG_EXP_TABLE_SIZE)
+static GLfloat exp_table[FOG_EXP_TABLE_SIZE];
+
+#if 1
+#define NEG_EXP( result, narg ) \
+do { \
+ GLfloat f = (GLfloat) (narg * (1.0/FOG_INCR)); \
+ GLint k = (GLint) f; \
+ if (k > FOG_EXP_TABLE_SIZE-2) \
+ result = (GLfloat) EXP_FOG_MAX; \
+ else \
+ result = exp_table[k] + (f-k)*(exp_table[k+1]-exp_table[k]); \
+} while (0)
+#else
+#define NEG_EXP( result, narg ) \
+do { \
+ result = exp(-narg); \
+} while (0)
+#endif
+
+
+/**
+ * Initialize the exp_table[] lookup table for approximating exp().
+ */
+void
+radeonInitStaticFogData( void )
+{
+ GLfloat f = 0.0F;
+ GLint i = 0;
+ for ( ; i < FOG_EXP_TABLE_SIZE ; i++, f += FOG_INCR) {
+ exp_table[i] = (GLfloat) exp(-f);
+ }
+}
+
+/**
+ * Compute per-vertex fog blend factors from fog coordinates by
+ * evaluating the GL_LINEAR, GL_EXP or GL_EXP2 fog function.
+ * Fog coordinates are distances from the eye (typically between the
+ * near and far clip plane distances).
+ * Note the fog (eye Z) coords may be negative so we use ABS(z) below.
+ * Fog blend factors are in the range [0,1].
+ */
+float
+radeonComputeFogBlendFactor( struct gl_context *ctx, GLfloat fogcoord )
+{
+ GLfloat end = ctx->Fog.End;
+ GLfloat d, temp;
+ const GLfloat z = FABSF(fogcoord);
+
+ switch (ctx->Fog.Mode) {
+ case GL_LINEAR:
+ if (ctx->Fog.Start == ctx->Fog.End)
+ d = 1.0F;
+ else
+ d = 1.0F / (ctx->Fog.End - ctx->Fog.Start);
+ temp = (end - z) * d;
+ return CLAMP(temp, 0.0F, 1.0F);
+ break;
+ case GL_EXP:
+ d = ctx->Fog.Density;
+ NEG_EXP( temp, d * z );
+ return temp;
+ break;
+ case GL_EXP2:
+ d = ctx->Fog.Density*ctx->Fog.Density;
+ NEG_EXP( temp, d * z * z );
+ return temp;
+ break;
+ default:
+ _mesa_problem(ctx, "Bad fog mode in make_fog_coord");
+ return 0;
+ }
+}
+
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.h
index 76c40f621f3..0c3b8d5166b 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_fog.h
@@ -1 +1,44 @@
-../radeon/radeon_fog.h \ No newline at end of file
+/**************************************************************************
+
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
+ Tungsten Graphics Inc., Austin, Texas.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef RADEON_FOG_H
+#define RADEON_FOG_H
+
+void
+radeonInitStaticFogData( void );
+
+float
+radeonComputeFogBlendFactor( struct gl_context *ctx, GLfloat fogcoord );
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.c
index 31c0cfbe942..f4fb3a55a5d 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.c
@@ -1 +1,610 @@
-../radeon/radeon_mipmap_tree.c \ No newline at end of file
+/*
+ * Copyright (C) 2009 Maciej Cencora.
+ * Copyright (C) 2008 Nicolai Haehnle.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "radeon_mipmap_tree.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#include "main/simple_list.h"
+#include "main/teximage.h"
+#include "main/texobj.h"
+#include "main/enums.h"
+#include "radeon_texture.h"
+#include "radeon_tile.h"
+
+static unsigned get_aligned_compressed_row_stride(
+ gl_format format,
+ unsigned width,
+ unsigned minStride)
+{
+ const unsigned blockBytes = _mesa_get_format_bytes(format);
+ unsigned blockWidth, blockHeight;
+ unsigned stride;
+
+ _mesa_get_format_block_size(format, &blockWidth, &blockHeight);
+
+ /* Count number of blocks required to store the given width.
+ * And then multiple it with bytes required to store a block.
+ */
+ stride = (width + blockWidth - 1) / blockWidth * blockBytes;
+
+ /* Round the given minimum stride to the next full blocksize.
+ * (minStride + blockBytes - 1) / blockBytes * blockBytes
+ */
+ if ( stride < minStride )
+ stride = (minStride + blockBytes - 1) / blockBytes * blockBytes;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s width %u, minStride %u, block(bytes %u, width %u):"
+ "stride %u\n",
+ __func__, width, minStride,
+ blockBytes, blockWidth,
+ stride);
+
+ return stride;
+}
+
+unsigned get_texture_image_size(
+ gl_format format,
+ unsigned rowStride,
+ unsigned height,
+ unsigned depth,
+ unsigned tiling)
+{
+ if (_mesa_is_format_compressed(format)) {
+ unsigned blockWidth, blockHeight;
+
+ _mesa_get_format_block_size(format, &blockWidth, &blockHeight);
+
+ return rowStride * ((height + blockHeight - 1) / blockHeight) * depth;
+ } else if (tiling) {
+ /* Need to align height to tile height */
+ unsigned tileWidth, tileHeight;
+
+ get_tile_size(format, &tileWidth, &tileHeight);
+ tileHeight--;
+
+ height = (height + tileHeight) & ~tileHeight;
+ }
+
+ return rowStride * height * depth;
+}
+
+unsigned get_texture_image_row_stride(radeonContextPtr rmesa, gl_format format, unsigned width, unsigned tiling, GLuint target)
+{
+ if (_mesa_is_format_compressed(format)) {
+ return get_aligned_compressed_row_stride(format, width, rmesa->texture_compressed_row_align);
+ } else {
+ unsigned row_align;
+
+ if (!_mesa_is_pow_two(width) || target == GL_TEXTURE_RECTANGLE) {
+ row_align = rmesa->texture_rect_row_align - 1;
+ } else if (tiling) {
+ unsigned tileWidth, tileHeight;
+ get_tile_size(format, &tileWidth, &tileHeight);
+ row_align = tileWidth * _mesa_get_format_bytes(format) - 1;
+ } else {
+ row_align = rmesa->texture_row_align - 1;
+ }
+
+ return (_mesa_format_row_stride(format, width) + row_align) & ~row_align;
+ }
+}
+
+/**
+ * Compute sizes and fill in offset and blit information for the given
+ * image (determined by \p face and \p level).
+ *
+ * \param curOffset points to the offset at which the image is to be stored
+ * and is updated by this function according to the size of the image.
+ */
+static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree *mt,
+ GLuint face, GLuint level, GLuint* curOffset)
+{
+ radeon_mipmap_level *lvl = &mt->levels[level];
+ GLuint height;
+
+ height = _mesa_next_pow_two_32(lvl->height);
+
+ lvl->rowstride = get_texture_image_row_stride(rmesa, mt->mesaFormat, lvl->width, mt->tilebits, mt->target);
+ lvl->size = get_texture_image_size(mt->mesaFormat, lvl->rowstride, height, lvl->depth, mt->tilebits);
+
+ assert(lvl->size > 0);
+
+ lvl->faces[face].offset = *curOffset;
+ *curOffset += lvl->size;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p) level %d, face %d: rs:%d %dx%d at %d\n",
+ __func__, rmesa,
+ level, face,
+ lvl->rowstride, lvl->width, height, lvl->faces[face].offset);
+}
+
+static GLuint minify(GLuint size, GLuint levels)
+{
+ size = size >> levels;
+ if (size < 1)
+ size = 1;
+ return size;
+}
+
+
+static void calculate_miptree_layout(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
+{
+ GLuint curOffset, i, face, level;
+
+ assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels);
+
+ curOffset = 0;
+ for(face = 0; face < mt->faces; face++) {
+
+ for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
+ mt->levels[level].valid = 1;
+ mt->levels[level].width = minify(mt->width0, i);
+ mt->levels[level].height = minify(mt->height0, i);
+ mt->levels[level].depth = minify(mt->depth0, i);
+ compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
+ }
+ }
+
+ /* Note the required size in memory */
+ mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p, %p) total size %d\n",
+ __func__, rmesa, mt, mt->totalsize);
+}
+
+/**
+ * Create a new mipmap tree, calculate its layout and allocate memory.
+ */
+radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa,
+ GLenum target, gl_format mesaFormat, GLuint baseLevel, GLuint numLevels,
+ GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits)
+{
+ radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree);
+
+ radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
+ "%s(%p) new tree is %p.\n",
+ __func__, rmesa, mt);
+
+ mt->mesaFormat = mesaFormat;
+ mt->refcount = 1;
+ mt->target = target;
+ mt->faces = _mesa_num_tex_faces(target);
+ mt->baseLevel = baseLevel;
+ mt->numLevels = numLevels;
+ mt->width0 = width0;
+ mt->height0 = height0;
+ mt->depth0 = depth0;
+ mt->tilebits = tilebits;
+
+ calculate_miptree_layout(rmesa, mt);
+
+ mt->bo = radeon_bo_open(rmesa->radeonScreen->bom,
+ 0, mt->totalsize, 1024,
+ RADEON_GEM_DOMAIN_VRAM,
+ 0);
+
+ return mt;
+}
+
+void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr)
+{
+ assert(!*ptr);
+
+ mt->refcount++;
+ assert(mt->refcount > 0);
+
+ *ptr = mt;
+}
+
+void radeon_miptree_unreference(radeon_mipmap_tree **ptr)
+{
+ radeon_mipmap_tree *mt = *ptr;
+ if (!mt)
+ return;
+
+ assert(mt->refcount > 0);
+
+ mt->refcount--;
+ if (!mt->refcount) {
+ radeon_bo_unref(mt->bo);
+ free(mt);
+ }
+
+ *ptr = 0;
+}
+
+/**
+ * Calculate min and max LOD for the given texture object.
+ * @param[in] tObj texture object whose LOD values to calculate
+ * @param[out] pminLod minimal LOD
+ * @param[out] pmaxLod maximal LOD
+ */
+static void calculate_min_max_lod(struct gl_sampler_object *samp, struct gl_texture_object *tObj,
+ unsigned *pminLod, unsigned *pmaxLod)
+{
+ int minLod, maxLod;
+ /* Yes, this looks overly complicated, but it's all needed.
+ */
+ switch (tObj->Target) {
+ case GL_TEXTURE_1D:
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_3D:
+ case GL_TEXTURE_CUBE_MAP:
+ if (samp->MinFilter == GL_NEAREST || samp->MinFilter == GL_LINEAR) {
+ /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
+ */
+ minLod = maxLod = tObj->BaseLevel;
+ } else {
+ minLod = tObj->BaseLevel + (GLint)(samp->MinLod);
+ minLod = MAX2(minLod, tObj->BaseLevel);
+ minLod = MIN2(minLod, tObj->MaxLevel);
+ maxLod = tObj->BaseLevel + (GLint)(samp->MaxLod + 0.5);
+ maxLod = MIN2(maxLod, tObj->MaxLevel);
+ maxLod = MIN2(maxLod, tObj->Image[0][minLod]->MaxNumLevels - 1 + minLod);
+ maxLod = MAX2(maxLod, minLod); /* need at least one level */
+ }
+ break;
+ case GL_TEXTURE_RECTANGLE_NV:
+ case GL_TEXTURE_4D_SGIS:
+ minLod = maxLod = 0;
+ break;
+ default:
+ return;
+ }
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s(%p) target %s, min %d, max %d.\n",
+ __func__, tObj,
+ _mesa_lookup_enum_by_nr(tObj->Target),
+ minLod, maxLod);
+
+ /* save these values */
+ *pminLod = minLod;
+ *pmaxLod = maxLod;
+}
+
+/**
+ * Checks whether the given miptree can hold the given texture image at the
+ * given face and level.
+ */
+GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
+ struct gl_texture_image *texImage)
+{
+ radeon_mipmap_level *lvl;
+ GLuint level = texImage->Level;
+ if (texImage->TexFormat != mt->mesaFormat)
+ return GL_FALSE;
+
+ lvl = &mt->levels[level];
+ if (!lvl->valid ||
+ lvl->width != texImage->Width ||
+ lvl->height != texImage->Height ||
+ lvl->depth != texImage->Depth)
+ return GL_FALSE;
+
+ return GL_TRUE;
+}
+
+/**
+ * Checks whether the given miptree has the right format to store the given texture object.
+ */
+static GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj)
+{
+ struct gl_texture_image *firstImage;
+ unsigned numLevels;
+ radeon_mipmap_level *mtBaseLevel;
+
+ if (texObj->BaseLevel < mt->baseLevel)
+ return GL_FALSE;
+
+ mtBaseLevel = &mt->levels[texObj->BaseLevel - mt->baseLevel];
+ firstImage = texObj->Image[0][texObj->BaseLevel];
+ numLevels = MIN2(texObj->_MaxLevel - texObj->BaseLevel + 1, firstImage->MaxNumLevels);
+
+ if (radeon_is_debug_enabled(RADEON_TEXTURE,RADEON_TRACE)) {
+ fprintf(stderr, "Checking if miptree %p matches texObj %p\n", mt, texObj);
+ fprintf(stderr, "target %d vs %d\n", mt->target, texObj->Target);
+ fprintf(stderr, "format %d vs %d\n", mt->mesaFormat, firstImage->TexFormat);
+ fprintf(stderr, "numLevels %d vs %d\n", mt->numLevels, numLevels);
+ fprintf(stderr, "width0 %d vs %d\n", mtBaseLevel->width, firstImage->Width);
+ fprintf(stderr, "height0 %d vs %d\n", mtBaseLevel->height, firstImage->Height);
+ fprintf(stderr, "depth0 %d vs %d\n", mtBaseLevel->depth, firstImage->Depth);
+ if (mt->target == texObj->Target &&
+ mt->mesaFormat == firstImage->TexFormat &&
+ mt->numLevels >= numLevels &&
+ mtBaseLevel->width == firstImage->Width &&
+ mtBaseLevel->height == firstImage->Height &&
+ mtBaseLevel->depth == firstImage->Depth) {
+ fprintf(stderr, "MATCHED\n");
+ } else {
+ fprintf(stderr, "NOT MATCHED\n");
+ }
+ }
+
+ return (mt->target == texObj->Target &&
+ mt->mesaFormat == firstImage->TexFormat &&
+ mt->numLevels >= numLevels &&
+ mtBaseLevel->width == firstImage->Width &&
+ mtBaseLevel->height == firstImage->Height &&
+ mtBaseLevel->depth == firstImage->Depth);
+}
+
+/**
+ * Try to allocate a mipmap tree for the given texture object.
+ * @param[in] rmesa radeon context
+ * @param[in] t radeon texture object
+ */
+void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t)
+{
+ struct gl_texture_object *texObj = &t->base;
+ struct gl_texture_image *texImg = texObj->Image[0][texObj->BaseLevel];
+ GLuint numLevels;
+ assert(!t->mt);
+
+ if (!texImg) {
+ radeon_warning("%s(%p) No image in given texture object(%p).\n",
+ __func__, rmesa, t);
+ return;
+ }
+
+
+ numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, texImg->MaxNumLevels);
+
+ t->mt = radeon_miptree_create(rmesa, t->base.Target,
+ texImg->TexFormat, texObj->BaseLevel,
+ numLevels, texImg->Width, texImg->Height,
+ texImg->Depth, t->tile_bits);
+}
+
+GLuint
+radeon_miptree_image_offset(radeon_mipmap_tree *mt,
+ GLuint face, GLuint level)
+{
+ if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
+ return (mt->levels[level].faces[face].offset);
+ else
+ return mt->levels[level].faces[0].offset;
+}
+
+/**
+ * Ensure that the given image is stored in the given miptree from now on.
+ */
+static void migrate_image_to_miptree(radeon_mipmap_tree *mt,
+ radeon_texture_image *image,
+ int face, int level)
+{
+ radeon_mipmap_level *dstlvl = &mt->levels[level];
+ unsigned char *dest;
+
+ assert(image->mt != mt);
+ assert(dstlvl->valid);
+ assert(dstlvl->width == image->base.Base.Width);
+ assert(dstlvl->height == image->base.Base.Height);
+ assert(dstlvl->depth == image->base.Base.Depth);
+
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
+ "%s miptree %p, image %p, face %d, level %d.\n",
+ __func__, mt, image, face, level);
+
+ radeon_bo_map(mt->bo, GL_TRUE);
+ dest = mt->bo->ptr + dstlvl->faces[face].offset;
+
+ if (image->mt) {
+ /* Format etc. should match, so we really just need a memcpy().
+ * In fact, that memcpy() could be done by the hardware in many
+ * cases, provided that we have a proper memory manager.
+ */
+ assert(mt->mesaFormat == image->base.Base.TexFormat);
+
+ radeon_mipmap_level *srclvl = &image->mt->levels[image->base.Base.Level];
+
+ assert(image->base.Base.Level == level);
+ assert(srclvl->size == dstlvl->size);
+ assert(srclvl->rowstride == dstlvl->rowstride);
+
+ radeon_bo_map(image->mt->bo, GL_FALSE);
+
+ memcpy(dest,
+ image->mt->bo->ptr + srclvl->faces[face].offset,
+ dstlvl->size);
+ radeon_bo_unmap(image->mt->bo);
+
+ radeon_miptree_unreference(&image->mt);
+ } else if (image->base.Map) {
+ /* This condition should be removed, it's here to workaround
+ * a segfault when mapping textures during software fallbacks.
+ */
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
+ "%s Trying to map texture in software fallback.\n",
+ __func__);
+ const uint32_t srcrowstride = _mesa_format_row_stride(image->base.Base.TexFormat, image->base.Base.Width);
+ uint32_t rows = image->base.Base.Height * image->base.Base.Depth;
+
+ if (_mesa_is_format_compressed(image->base.Base.TexFormat)) {
+ uint32_t blockWidth, blockHeight;
+ _mesa_get_format_block_size(image->base.Base.TexFormat, &blockWidth, &blockHeight);
+ rows = (rows + blockHeight - 1) / blockHeight;
+ }
+
+ copy_rows(dest, dstlvl->rowstride, image->base.Map, srcrowstride,
+ rows, srcrowstride);
+
+ _mesa_align_free(image->base.Map);
+ image->base.Map = 0;
+ }
+
+ radeon_bo_unmap(mt->bo);
+
+ radeon_miptree_reference(mt, &image->mt);
+}
+
+/**
+ * Filter matching miptrees, and select one with the most of data.
+ * @param[in] texObj radeon texture object
+ * @param[in] firstLevel first texture level to check
+ * @param[in] lastLevel last texture level to check
+ */
+static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj,
+ unsigned firstLevel,
+ unsigned lastLevel)
+{
+ const unsigned numLevels = lastLevel - firstLevel + 1;
+ unsigned *mtSizes = calloc(numLevels, sizeof(unsigned));
+ radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *));
+ unsigned mtCount = 0;
+ unsigned maxMtIndex = 0;
+ radeon_mipmap_tree *tmp;
+ unsigned int level;
+ int i;
+
+ for (level = firstLevel; level <= lastLevel; ++level) {
+ radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]);
+ unsigned found = 0;
+ // TODO: why this hack??
+ if (!img)
+ break;
+
+ if (!img->mt)
+ continue;
+
+ for (i = 0; i < mtCount; ++i) {
+ if (mts[i] == img->mt) {
+ found = 1;
+ mtSizes[i] += img->mt->levels[img->base.Base.Level].size;
+ break;
+ }
+ }
+
+ if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) {
+ mtSizes[mtCount] = img->mt->levels[img->base.Base.Level].size;
+ mts[mtCount] = img->mt;
+ mtCount++;
+ }
+ }
+
+ if (mtCount == 0) {
+ free(mtSizes);
+ free(mts);
+ return NULL;
+ }
+
+ for (i = 1; i < mtCount; ++i) {
+ if (mtSizes[i] > mtSizes[maxMtIndex]) {
+ maxMtIndex = i;
+ }
+ }
+
+ tmp = mts[maxMtIndex];
+ free(mtSizes);
+ free(mts);
+
+ return tmp;
+}
+
+/**
+ * Validate texture mipmap tree.
+ * If individual images are stored in different mipmap trees
+ * use the mipmap tree that has the most of the correct data.
+ */
+int radeon_validate_texture_miptree(struct gl_context * ctx,
+ struct gl_sampler_object *samp,
+ struct gl_texture_object *texObj)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ radeonTexObj *t = radeon_tex_obj(texObj);
+ radeon_mipmap_tree *dst_miptree;
+
+ if (samp == &texObj->Sampler && (t->validated || t->image_override)) {
+ return GL_TRUE;
+ }
+
+ calculate_min_max_lod(samp, &t->base, &t->minLod, &t->maxLod);
+
+ radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
+ "%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
+ __FUNCTION__, texObj ,t->minLod, t->maxLod);
+
+ dst_miptree = get_biggest_matching_miptree(t, t->base.BaseLevel, t->base._MaxLevel);
+
+ radeon_miptree_unreference(&t->mt);
+ if (!dst_miptree) {
+ radeon_try_alloc_miptree(rmesa, t);
+ radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
+ "%s: No matching miptree found, allocated new one %p\n",
+ __FUNCTION__, t->mt);
+
+ } else {
+ radeon_miptree_reference(dst_miptree, &t->mt);
+ radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
+ "%s: Using miptree %p\n", __FUNCTION__, t->mt);
+ }
+
+ const unsigned faces = _mesa_num_tex_faces(texObj->Target);
+ unsigned face, level;
+ radeon_texture_image *img;
+ /* Validate only the levels that will actually be used during rendering */
+ for (face = 0; face < faces; ++face) {
+ for (level = t->minLod; level <= t->maxLod; ++level) {
+ img = get_radeon_texture_image(texObj->Image[face][level]);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "Checking image level %d, face %d, mt %p ... ",
+ level, face, img->mt);
+
+ if (img->mt != t->mt && !img->used_as_render_target) {
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "MIGRATING\n");
+
+ struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo;
+ if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) {
+ radeon_firevertices(rmesa);
+ }
+ migrate_image_to_miptree(t->mt, img, face, level);
+ } else
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n");
+ }
+ }
+
+ t->validated = GL_TRUE;
+
+ return GL_TRUE;
+}
+
+uint32_t get_base_teximage_offset(radeonTexObj *texObj)
+{
+ if (!texObj->mt) {
+ return 0;
+ } else {
+ return radeon_miptree_image_offset(texObj->mt, 0, texObj->minLod);
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.h
index 254d50cf8c5..74007ffdebc 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_mipmap_tree.h
@@ -1 +1,106 @@
-../radeon/radeon_mipmap_tree.h \ No newline at end of file
+/*
+ * Copyright (C) 2008 Nicolai Haehnle.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RADEON_MIPMAP_TREE_H_
+#define __RADEON_MIPMAP_TREE_H_
+
+#include "radeon_common.h"
+
+typedef struct _radeon_mipmap_tree radeon_mipmap_tree;
+typedef struct _radeon_mipmap_level radeon_mipmap_level;
+typedef struct _radeon_mipmap_image radeon_mipmap_image;
+
+struct _radeon_mipmap_image {
+ GLuint offset; /** Offset of this image from the start of mipmap tree buffer, in bytes */
+};
+
+struct _radeon_mipmap_level {
+ GLuint width;
+ GLuint height;
+ GLuint depth;
+ GLuint size; /** Size of each image, in bytes */
+ GLuint rowstride; /** in bytes */
+ GLuint valid;
+ radeon_mipmap_image faces[6];
+};
+
+/* store the max possible in the miptree */
+#define RADEON_MIPTREE_MAX_TEXTURE_LEVELS 15
+
+/**
+ * A mipmap tree contains texture images in the layout that the hardware
+ * expects.
+ *
+ * The meta-data of mipmap trees is immutable, i.e. you cannot change the
+ * layout on-the-fly; however, the texture contents (i.e. texels) can be
+ * changed.
+ */
+struct _radeon_mipmap_tree {
+ struct radeon_bo *bo;
+ GLuint refcount;
+
+ GLuint totalsize; /** total size of the miptree, in bytes */
+
+ GLenum target; /** GL_TEXTURE_xxx */
+ GLenum mesaFormat; /** MESA_FORMAT_xxx */
+ GLuint faces; /** # of faces: 6 for cubemaps, 1 otherwise */
+ GLuint baseLevel; /** gl_texture_object->baseLevel it was created for */
+ GLuint numLevels; /** Number of mip levels stored in this mipmap tree */
+
+ GLuint width0; /** Width of baseLevel image */
+ GLuint height0; /** Height of baseLevel image */
+ GLuint depth0; /** Depth of baseLevel image */
+
+ GLuint tilebits; /** RADEON_TXO_xxx_TILE */
+
+ radeon_mipmap_level levels[RADEON_MIPTREE_MAX_TEXTURE_LEVELS];
+};
+
+void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr);
+void radeon_miptree_unreference(radeon_mipmap_tree **ptr);
+
+GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
+ struct gl_texture_image *texImage);
+
+void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t);
+GLuint radeon_miptree_image_offset(radeon_mipmap_tree *mt,
+ GLuint face, GLuint level);
+uint32_t get_base_teximage_offset(radeonTexObj *texObj);
+
+unsigned get_texture_image_row_stride(radeonContextPtr rmesa, gl_format format, unsigned width, unsigned tiling, unsigned target);
+
+unsigned get_texture_image_size(
+ gl_format format,
+ unsigned rowStride,
+ unsigned height,
+ unsigned depth,
+ unsigned tiling);
+
+radeon_mipmap_tree *radeon_miptree_create(radeonContextPtr rmesa,
+ GLenum target, gl_format mesaFormat, GLuint baseLevel, GLuint numLevels,
+ GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits);
+#endif /* __RADEON_MIPMAP_TREE_H_ */
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_pixel_read.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_pixel_read.c
index 3b03803126f..db5e01da49b 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_pixel_read.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_pixel_read.c
@@ -1 +1,221 @@
-../radeon/radeon_pixel_read.c \ No newline at end of file
+/*
+ * Copyright (C) 2010 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "stdint.h"
+#include "main/bufferobj.h"
+#include "main/enums.h"
+#include "main/fbobject.h"
+#include "main/image.h"
+#include "main/readpix.h"
+#include "main/state.h"
+
+#include "radeon_buffer_objects.h"
+#include "radeon_common_context.h"
+#include "radeon_debug.h"
+#include "radeon_mipmap_tree.h"
+
+static gl_format gl_format_and_type_to_mesa_format(GLenum format, GLenum type)
+{
+ switch (format)
+ {
+ case GL_RGB:
+ switch (type) {
+ case GL_UNSIGNED_SHORT_5_6_5:
+ return MESA_FORMAT_RGB565;
+ case GL_UNSIGNED_SHORT_5_6_5_REV:
+ return MESA_FORMAT_RGB565_REV;
+ }
+ break;
+ case GL_RGBA:
+ switch (type) {
+ case GL_FLOAT:
+ return MESA_FORMAT_RGBA_FLOAT32;
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ return MESA_FORMAT_RGBA5551;
+ case GL_UNSIGNED_INT_8_8_8_8:
+ return MESA_FORMAT_RGBA8888;
+ case GL_UNSIGNED_BYTE:
+ case GL_UNSIGNED_INT_8_8_8_8_REV:
+ return MESA_FORMAT_RGBA8888_REV;
+ }
+ break;
+ case GL_BGRA:
+ switch (type) {
+ case GL_UNSIGNED_SHORT_4_4_4_4:
+ return MESA_FORMAT_ARGB4444_REV;
+ case GL_UNSIGNED_SHORT_4_4_4_4_REV:
+ return MESA_FORMAT_ARGB4444;
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ return MESA_FORMAT_ARGB1555_REV;
+ case GL_UNSIGNED_SHORT_1_5_5_5_REV:
+ return MESA_FORMAT_ARGB1555;
+ case GL_UNSIGNED_INT_8_8_8_8:
+ return MESA_FORMAT_ARGB8888_REV;
+ case GL_UNSIGNED_BYTE:
+ case GL_UNSIGNED_INT_8_8_8_8_REV:
+ return MESA_FORMAT_ARGB8888;
+
+ }
+ break;
+ }
+
+ return MESA_FORMAT_NONE;
+}
+
+static GLboolean
+do_blit_readpixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ const struct radeon_renderbuffer *rrb = radeon_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
+ const gl_format dst_format = gl_format_and_type_to_mesa_format(format, type);
+ unsigned dst_rowstride, dst_imagesize, aligned_rowstride, flip_y;
+ struct radeon_bo *dst_buffer;
+ GLint dst_x = 0, dst_y = 0;
+ intptr_t dst_offset;
+
+ /* It's not worth if number of pixels to copy is really small */
+ if (width * height < 100) {
+ return GL_FALSE;
+ }
+
+ if (dst_format == MESA_FORMAT_NONE ||
+ !radeon->vtbl.check_blit(dst_format, rrb->pitch / rrb->cpp) || !radeon->vtbl.blit) {
+ return GL_FALSE;
+ }
+
+ if (ctx->_ImageTransferState || ctx->Color.ColorLogicOpEnabled) {
+ return GL_FALSE;
+ }
+
+ if (pack->SwapBytes || pack->LsbFirst) {
+ return GL_FALSE;
+ }
+
+ if (pack->RowLength > 0) {
+ dst_rowstride = pack->RowLength;
+ } else {
+ dst_rowstride = width;
+ }
+
+ if (!_mesa_clip_copytexsubimage(ctx, &dst_x, &dst_y, &x, &y, &width, &height)) {
+ return GL_TRUE;
+ }
+ assert(x >= 0 && y >= 0);
+
+ aligned_rowstride = get_texture_image_row_stride(radeon, dst_format, dst_rowstride, 0, GL_TEXTURE_2D);
+ dst_rowstride *= _mesa_get_format_bytes(dst_format);
+ if (_mesa_is_bufferobj(pack->BufferObj) && aligned_rowstride != dst_rowstride)
+ return GL_FALSE;
+ dst_imagesize = get_texture_image_size(dst_format,
+ aligned_rowstride,
+ height, 1, 0);
+
+ if (!_mesa_is_bufferobj(pack->BufferObj))
+ {
+ dst_buffer = radeon_bo_open(radeon->radeonScreen->bom, 0, dst_imagesize, 1024, RADEON_GEM_DOMAIN_GTT, 0);
+ dst_offset = 0;
+ }
+ else
+ {
+ dst_buffer = get_radeon_buffer_object(pack->BufferObj)->bo;
+ dst_offset = (intptr_t)pixels;
+ }
+
+ /* Disable source Y flipping for FBOs */
+ flip_y = _mesa_is_winsys_fbo(ctx->ReadBuffer);
+ if (pack->Invert) {
+ y = rrb->base.Base.Height - height - y;
+ flip_y = !flip_y;
+ }
+
+ if (radeon->vtbl.blit(ctx,
+ rrb->bo,
+ rrb->draw_offset,
+ rrb->base.Base.Format,
+ rrb->pitch / rrb->cpp,
+ rrb->base.Base.Width,
+ rrb->base.Base.Height,
+ x,
+ y,
+ dst_buffer,
+ dst_offset,
+ dst_format,
+ aligned_rowstride / _mesa_get_format_bytes(dst_format),
+ width,
+ height,
+ 0, /* dst_x */
+ 0, /* dst_y */
+ width,
+ height,
+ flip_y))
+ {
+ if (!_mesa_is_bufferobj(pack->BufferObj))
+ {
+ radeon_bo_map(dst_buffer, 0);
+ copy_rows(pixels, dst_rowstride, dst_buffer->ptr,
+ aligned_rowstride, height, dst_rowstride);
+ radeon_bo_unmap(dst_buffer);
+ radeon_bo_unref(dst_buffer);
+ }
+
+ return GL_TRUE;
+ }
+
+ if (!_mesa_is_bufferobj(pack->BufferObj))
+ radeon_bo_unref(dst_buffer);
+
+ return GL_FALSE;
+}
+
+void
+radeonReadPixels(struct gl_context * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ radeon_prepare_render(radeon);
+
+ if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, pixels))
+ return;
+
+ /* Update Mesa state before calling _mesa_readpixels().
+ * XXX this may not be needed since ReadPixels no longer uses the
+ * span code.
+ */
+ radeon_print(RADEON_FALLBACKS, RADEON_NORMAL,
+ "Falling back to sw for ReadPixels (format %s, type %s)\n",
+ _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type));
+
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.c
index 1d6ebc1c48b..5c02f5c2ed0 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.c
@@ -1 +1,217 @@
-../radeon/radeon_queryobj.c \ No newline at end of file
+/*
+ * Copyright © 2008-2009 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Maciej Cencora <m.cencora@gmail.com>
+ *
+ */
+#include "radeon_common.h"
+#include "radeon_queryobj.h"
+#include "radeon_debug.h"
+
+#include "main/imports.h"
+#include "main/simple_list.h"
+
+#include <inttypes.h>
+
+static void radeonQueryGetResult(struct gl_context *ctx, struct gl_query_object *q)
+{
+ struct radeon_query_object *query = (struct radeon_query_object *)q;
+ uint32_t *result;
+ int i;
+
+ radeon_print(RADEON_STATE, RADEON_VERBOSE,
+ "%s: query id %d, result %d\n",
+ __FUNCTION__, query->Base.Id, (int) query->Base.Result);
+
+ radeon_bo_map(query->bo, GL_FALSE);
+ result = query->bo->ptr;
+
+ query->Base.Result = 0;
+ for (i = 0; i < query->curr_offset/sizeof(uint32_t); ++i) {
+ query->Base.Result += LE32_TO_CPU(result[i]);
+ radeon_print(RADEON_STATE, RADEON_TRACE, "result[%d] = %d\n", i, LE32_TO_CPU(result[i]));
+ }
+
+ radeon_bo_unmap(query->bo);
+}
+
+static struct gl_query_object * radeonNewQueryObject(struct gl_context *ctx, GLuint id)
+{
+ struct radeon_query_object *query;
+
+ query = calloc(1, sizeof(struct radeon_query_object));
+
+ query->Base.Id = id;
+ query->Base.Result = 0;
+ query->Base.Active = GL_FALSE;
+ query->Base.Ready = GL_TRUE;
+
+ radeon_print(RADEON_STATE, RADEON_VERBOSE,"%s: query id %d\n", __FUNCTION__, query->Base.Id);
+
+ return &query->Base;
+}
+
+static void radeonDeleteQuery(struct gl_context *ctx, struct gl_query_object *q)
+{
+ struct radeon_query_object *query = (struct radeon_query_object *)q;
+
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
+
+ if (query->bo) {
+ radeon_bo_unref(query->bo);
+ }
+
+ free(query);
+}
+
+static void radeonWaitQuery(struct gl_context *ctx, struct gl_query_object *q)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_query_object *query = (struct radeon_query_object *)q;
+
+ /* If the cmdbuf with packets for this query hasn't been flushed yet, do it now */
+ if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs))
+ ctx->Driver.Flush(ctx);
+
+ radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, q->Id, query->bo, query->curr_offset);
+
+ radeonQueryGetResult(ctx, q);
+
+ query->Base.Ready = GL_TRUE;
+}
+
+
+static void radeonBeginQuery(struct gl_context *ctx, struct gl_query_object *q)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_query_object *query = (struct radeon_query_object *)q;
+
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
+
+ assert(radeon->query.current == NULL);
+
+ if (radeon->dma.flush)
+ radeon->dma.flush(radeon->glCtx);
+
+ if (!query->bo) {
+ query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
+ }
+ query->curr_offset = 0;
+
+ radeon->query.current = query;
+
+ radeon->query.queryobj.dirty = GL_TRUE;
+ radeon->hw.is_dirty = GL_TRUE;
+}
+
+void radeonEmitQueryEnd(struct gl_context *ctx)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_query_object *query = radeon->query.current;
+
+ if (!query)
+ return;
+
+ if (query->emitted_begin == GL_FALSE)
+ return;
+
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, query->Base.Id, query->bo, query->curr_offset);
+
+ radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
+ query->bo,
+ 0, RADEON_GEM_DOMAIN_GTT);
+
+ radeon->vtbl.emit_query_finish(radeon);
+}
+
+static void radeonEndQuery(struct gl_context *ctx, struct gl_query_object *q)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
+
+ if (radeon->dma.flush)
+ radeon->dma.flush(radeon->glCtx);
+ radeonEmitQueryEnd(ctx);
+
+ radeon->query.current = NULL;
+}
+
+static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q)
+{
+ radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __FUNCTION__, q->Id);
+\
+#ifdef DRM_RADEON_GEM_BUSY
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+
+ struct radeon_query_object *query = (struct radeon_query_object *)q;
+ uint32_t domain;
+
+ /* Need to perform a flush, as per ARB_occlusion_query spec */
+ if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) {
+ ctx->Driver.Flush(ctx);
+ }
+
+ if (radeon_bo_is_busy(query->bo, &domain) == 0) {
+ radeonQueryGetResult(ctx, q);
+ query->Base.Ready = GL_TRUE;
+ }
+#else
+ radeonWaitQuery(ctx, q);
+#endif
+}
+
+void radeonInitQueryObjFunctions(struct dd_function_table *functions)
+{
+ functions->NewQueryObject = radeonNewQueryObject;
+ functions->DeleteQuery = radeonDeleteQuery;
+ functions->BeginQuery = radeonBeginQuery;
+ functions->EndQuery = radeonEndQuery;
+ functions->CheckQuery = radeonCheckQuery;
+ functions->WaitQuery = radeonWaitQuery;
+}
+
+int radeon_check_query_active(struct gl_context *ctx, struct radeon_state_atom *atom)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ struct radeon_query_object *query = radeon->query.current;
+
+ if (!query || query->emitted_begin)
+ return 0;
+ return atom->cmd_size;
+}
+
+void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ BATCH_LOCALS(radeon);
+ int dwords;
+
+ dwords = (*atom->check) (ctx, atom);
+
+ BEGIN_BATCH_NO_AUTOSTATE(dwords);
+ OUT_BATCH_TABLE(atom->cmd, dwords);
+ END_BATCH();
+
+ radeon->query.current->emitted_begin = GL_TRUE;
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.h
index 8f6f842b0a6..e5063934824 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_queryobj.h
@@ -1 +1,55 @@
-../radeon/radeon_queryobj.h \ No newline at end of file
+/*
+ * Copyright © 2008 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Maciej Cencora <m.cencora@gmail.com>
+ *
+ */
+
+#include "main/imports.h"
+#include "main/simple_list.h"
+#include "radeon_common_context.h"
+
+extern void radeonEmitQueryBegin(struct gl_context *ctx);
+extern void radeonEmitQueryEnd(struct gl_context *ctx);
+
+extern void radeonInitQueryObjFunctions(struct dd_function_table *functions);
+
+#define RADEON_QUERY_PAGE_SIZE 4096
+
+int radeon_check_query_active(struct gl_context *ctx, struct radeon_state_atom *atom);
+void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom);
+
+static inline void radeon_init_query_stateobj(radeonContextPtr radeon, int SZ)
+{
+ radeon->query.queryobj.cmd_size = (SZ);
+ radeon->query.queryobj.cmd = (uint32_t*)CALLOC((SZ) * sizeof(uint32_t));
+ radeon->query.queryobj.name = "queryobj";
+ radeon->query.queryobj.idx = 0;
+ radeon->query.queryobj.check = radeon_check_query_active;
+ radeon->query.queryobj.dirty = GL_FALSE;
+ radeon->query.queryobj.emit = radeon_emit_queryobj;
+
+ radeon->hw.max_state_size += (SZ);
+ insert_at_tail(&radeon->hw.atomlist, &radeon->query.queryobj);
+}
+
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.c
index 86161118dd3..27b57c5a3ea 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.c
@@ -1 +1,796 @@
-../radeon/radeon_screen.c \ No newline at end of file
+/**************************************************************************
+
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
+ VA Linux Systems Inc., Fremont, California.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/**
+ * \file radeon_screen.c
+ * Screen initialization functions for the Radeon driver.
+ *
+ * \author Kevin E. Martin <martin@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <errno.h>
+#include "main/glheader.h"
+#include "main/imports.h"
+#include "main/mtypes.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/fbobject.h"
+#include "swrast/s_renderbuffer.h"
+
+#define STANDALONE_MMIO
+#include "radeon_chipset.h"
+#include "radeon_macros.h"
+#include "radeon_screen.h"
+#include "radeon_common.h"
+#include "radeon_common_context.h"
+#if defined(RADEON_R100)
+#include "radeon_context.h"
+#include "radeon_tex.h"
+#elif defined(RADEON_R200)
+#include "r200_context.h"
+#include "r200_tex.h"
+#endif
+
+#include "utils.h"
+
+#include "GL/internal/dri_interface.h"
+
+/* Radeon configuration
+ */
+#include "xmlpool.h"
+
+#define DRI_CONF_COMMAND_BUFFER_SIZE(def,min,max) \
+DRI_CONF_OPT_BEGIN_V(command_buffer_size,int,def, # min ":" # max ) \
+ DRI_CONF_DESC(en,"Size of command buffer (in KB)") \
+ DRI_CONF_DESC(de,"Grösse des Befehlspuffers (in KB)") \
+DRI_CONF_OPT_END
+
+#if defined(RADEON_R100) /* R100 */
+PUBLIC const char __driConfigOptions[] =
+DRI_CONF_BEGIN
+ DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_TCL_MODE(DRI_CONF_TCL_CODEGEN)
+ DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
+ DRI_CONF_MAX_TEXTURE_UNITS(3,2,3)
+ DRI_CONF_HYPERZ(false)
+ DRI_CONF_COMMAND_BUFFER_SIZE(8, 8, 32)
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_QUALITY
+ DRI_CONF_TEXTURE_DEPTH(DRI_CONF_TEXTURE_DEPTH_FB)
+ DRI_CONF_DEF_MAX_ANISOTROPY(1.0,"1.0,2.0,4.0,8.0,16.0")
+ DRI_CONF_NO_NEG_LOD_BIAS(false)
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_COLOR_REDUCTION(DRI_CONF_COLOR_REDUCTION_DITHER)
+ DRI_CONF_ROUND_MODE(DRI_CONF_ROUND_TRUNC)
+ DRI_CONF_DITHER_MODE(DRI_CONF_DITHER_XERRORDIFF)
+ DRI_CONF_ALLOW_LARGE_TEXTURES(2)
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_DEBUG
+ DRI_CONF_NO_RAST(false)
+ DRI_CONF_SECTION_END
+DRI_CONF_END;
+static const GLuint __driNConfigOptions = 15;
+
+#elif defined(RADEON_R200)
+
+PUBLIC const char __driConfigOptions[] =
+DRI_CONF_BEGIN
+ DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_TCL_MODE(DRI_CONF_TCL_CODEGEN)
+ DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
+ DRI_CONF_MAX_TEXTURE_UNITS(6,2,6)
+ DRI_CONF_HYPERZ(false)
+ DRI_CONF_COMMAND_BUFFER_SIZE(8, 8, 32)
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_QUALITY
+ DRI_CONF_TEXTURE_DEPTH(DRI_CONF_TEXTURE_DEPTH_FB)
+ DRI_CONF_DEF_MAX_ANISOTROPY(1.0,"1.0,2.0,4.0,8.0,16.0")
+ DRI_CONF_NO_NEG_LOD_BIAS(false)
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_COLOR_REDUCTION(DRI_CONF_COLOR_REDUCTION_DITHER)
+ DRI_CONF_ROUND_MODE(DRI_CONF_ROUND_TRUNC)
+ DRI_CONF_DITHER_MODE(DRI_CONF_DITHER_XERRORDIFF)
+ DRI_CONF_ALLOW_LARGE_TEXTURES(2)
+ DRI_CONF_TEXTURE_BLEND_QUALITY(1.0,"0.0:1.0")
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_DEBUG
+ DRI_CONF_NO_RAST(false)
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_SOFTWARE
+ DRI_CONF_NV_VERTEX_PROGRAM(false)
+ DRI_CONF_SECTION_END
+DRI_CONF_END;
+static const GLuint __driNConfigOptions = 17;
+
+#endif
+
+#ifndef RADEON_INFO_TILE_CONFIG
+#define RADEON_INFO_TILE_CONFIG 0x6
+#endif
+
+static int
+radeonGetParam(__DRIscreen *sPriv, int param, void *value)
+{
+ int ret;
+ drm_radeon_getparam_t gp = { 0 };
+ struct drm_radeon_info info = { 0 };
+
+ if (sPriv->drm_version.major >= 2) {
+ info.value = (uint64_t)(uintptr_t)value;
+ switch (param) {
+ case RADEON_PARAM_DEVICE_ID:
+ info.request = RADEON_INFO_DEVICE_ID;
+ break;
+ case RADEON_PARAM_NUM_GB_PIPES:
+ info.request = RADEON_INFO_NUM_GB_PIPES;
+ break;
+ case RADEON_PARAM_NUM_Z_PIPES:
+ info.request = RADEON_INFO_NUM_Z_PIPES;
+ break;
+ case RADEON_INFO_TILE_CONFIG:
+ info.request = RADEON_INFO_TILE_CONFIG;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = drmCommandWriteRead(sPriv->fd, DRM_RADEON_INFO, &info, sizeof(info));
+ } else {
+ gp.param = param;
+ gp.value = value;
+
+ ret = drmCommandWriteRead(sPriv->fd, DRM_RADEON_GETPARAM, &gp, sizeof(gp));
+ }
+ return ret;
+}
+
+#if defined(RADEON_R100)
+static const __DRItexBufferExtension radeonTexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ radeonSetTexBuffer,
+ radeonSetTexBuffer2,
+};
+#elif defined(RADEON_R200)
+static const __DRItexBufferExtension r200TexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ r200SetTexBuffer,
+ r200SetTexBuffer2,
+};
+#endif
+
+static void
+radeonDRI2Flush(__DRIdrawable *drawable)
+{
+ radeonContextPtr rmesa;
+
+ rmesa = (radeonContextPtr) drawable->driContextPriv->driverPrivate;
+ radeonFlush(rmesa->glCtx);
+}
+
+static const struct __DRI2flushExtensionRec radeonFlushExtension = {
+ { __DRI2_FLUSH, __DRI2_FLUSH_VERSION },
+ radeonDRI2Flush,
+ dri2InvalidateDrawable,
+};
+
+static __DRIimage *
+radeon_create_image_from_name(__DRIscreen *screen,
+ int width, int height, int format,
+ int name, int pitch, void *loaderPrivate)
+{
+ __DRIimage *image;
+ radeonScreenPtr radeonScreen = screen->driverPrivate;
+
+ if (name == 0)
+ return NULL;
+
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ switch (format) {
+ case __DRI_IMAGE_FORMAT_RGB565:
+ image->format = MESA_FORMAT_RGB565;
+ image->internal_format = GL_RGB;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ case __DRI_IMAGE_FORMAT_XRGB8888:
+ image->format = MESA_FORMAT_XRGB8888;
+ image->internal_format = GL_RGB;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ case __DRI_IMAGE_FORMAT_ARGB8888:
+ image->format = MESA_FORMAT_ARGB8888;
+ image->internal_format = GL_RGBA;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ default:
+ free(image);
+ return NULL;
+ }
+
+ image->data = loaderPrivate;
+ image->cpp = _mesa_get_format_bytes(image->format);
+ image->width = width;
+ image->pitch = pitch;
+ image->height = height;
+
+ image->bo = radeon_bo_open(radeonScreen->bom,
+ (uint32_t)name,
+ image->pitch * image->height * image->cpp,
+ 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ 0);
+
+ if (image->bo == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ return image;
+}
+
+static __DRIimage *
+radeon_create_image_from_renderbuffer(__DRIcontext *context,
+ int renderbuffer, void *loaderPrivate)
+{
+ __DRIimage *image;
+ radeonContextPtr radeon = context->driverPrivate;
+ struct gl_renderbuffer *rb;
+ struct radeon_renderbuffer *rrb;
+
+ rb = _mesa_lookup_renderbuffer(radeon->glCtx, renderbuffer);
+ if (!rb) {
+ _mesa_error(radeon->glCtx,
+ GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
+ return NULL;
+ }
+
+ rrb = radeon_renderbuffer(rb);
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ image->internal_format = rb->InternalFormat;
+ image->format = rb->Format;
+ image->cpp = rrb->cpp;
+ image->data_type = GL_UNSIGNED_BYTE;
+ image->data = loaderPrivate;
+ radeon_bo_ref(rrb->bo);
+ image->bo = rrb->bo;
+
+ image->width = rb->Width;
+ image->height = rb->Height;
+ image->pitch = rrb->pitch / image->cpp;
+
+ return image;
+}
+
+static void
+radeon_destroy_image(__DRIimage *image)
+{
+ radeon_bo_unref(image->bo);
+ FREE(image);
+}
+
+static __DRIimage *
+radeon_create_image(__DRIscreen *screen,
+ int width, int height, int format,
+ unsigned int use,
+ void *loaderPrivate)
+{
+ __DRIimage *image;
+ radeonScreenPtr radeonScreen = screen->driverPrivate;
+
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ image->dri_format = format;
+
+ switch (format) {
+ case __DRI_IMAGE_FORMAT_RGB565:
+ image->format = MESA_FORMAT_RGB565;
+ image->internal_format = GL_RGB;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ case __DRI_IMAGE_FORMAT_XRGB8888:
+ image->format = MESA_FORMAT_XRGB8888;
+ image->internal_format = GL_RGB;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ case __DRI_IMAGE_FORMAT_ARGB8888:
+ image->format = MESA_FORMAT_ARGB8888;
+ image->internal_format = GL_RGBA;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ default:
+ free(image);
+ return NULL;
+ }
+
+ image->data = loaderPrivate;
+ image->cpp = _mesa_get_format_bytes(image->format);
+ image->width = width;
+ image->height = height;
+ image->pitch = ((image->cpp * image->width + 255) & ~255) / image->cpp;
+
+ image->bo = radeon_bo_open(radeonScreen->bom,
+ 0,
+ image->pitch * image->height * image->cpp,
+ 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ 0);
+
+ if (image->bo == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ return image;
+}
+
+static GLboolean
+radeon_query_image(__DRIimage *image, int attrib, int *value)
+{
+ switch (attrib) {
+ case __DRI_IMAGE_ATTRIB_STRIDE:
+ *value = image->pitch * image->cpp;
+ return GL_TRUE;
+ case __DRI_IMAGE_ATTRIB_HANDLE:
+ *value = image->bo->handle;
+ return GL_TRUE;
+ case __DRI_IMAGE_ATTRIB_NAME:
+ radeon_gem_get_kernel_name(image->bo, (uint32_t *) value);
+ return GL_TRUE;
+ default:
+ return GL_FALSE;
+ }
+}
+
+static struct __DRIimageExtensionRec radeonImageExtension = {
+ { __DRI_IMAGE, 1 },
+ radeon_create_image_from_name,
+ radeon_create_image_from_renderbuffer,
+ radeon_destroy_image,
+ radeon_create_image,
+ radeon_query_image
+};
+
+static int radeon_set_screen_flags(radeonScreenPtr screen, int device_id)
+{
+ screen->device_id = device_id;
+ screen->chip_flags = 0;
+ switch ( device_id ) {
+#if defined(RADEON_R100)
+ case PCI_CHIP_RN50_515E:
+ case PCI_CHIP_RN50_5969:
+ return -1;
+
+ case PCI_CHIP_RADEON_LY:
+ case PCI_CHIP_RADEON_LZ:
+ case PCI_CHIP_RADEON_QY:
+ case PCI_CHIP_RADEON_QZ:
+ screen->chip_family = CHIP_FAMILY_RV100;
+ break;
+
+ case PCI_CHIP_RS100_4136:
+ case PCI_CHIP_RS100_4336:
+ screen->chip_family = CHIP_FAMILY_RS100;
+ break;
+
+ case PCI_CHIP_RS200_4137:
+ case PCI_CHIP_RS200_4337:
+ case PCI_CHIP_RS250_4237:
+ case PCI_CHIP_RS250_4437:
+ screen->chip_family = CHIP_FAMILY_RS200;
+ break;
+
+ case PCI_CHIP_RADEON_QD:
+ case PCI_CHIP_RADEON_QE:
+ case PCI_CHIP_RADEON_QF:
+ case PCI_CHIP_RADEON_QG:
+ /* all original radeons (7200) presumably have a stencil op bug */
+ screen->chip_family = CHIP_FAMILY_R100;
+ screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_BROKEN_STENCIL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
+ break;
+
+ case PCI_CHIP_RV200_QW:
+ case PCI_CHIP_RV200_QX:
+ case PCI_CHIP_RADEON_LW:
+ case PCI_CHIP_RADEON_LX:
+ screen->chip_family = CHIP_FAMILY_RV200;
+ screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
+ break;
+
+#elif defined(RADEON_R200)
+ case PCI_CHIP_R200_BB:
+ case PCI_CHIP_R200_QH:
+ case PCI_CHIP_R200_QL:
+ case PCI_CHIP_R200_QM:
+ screen->chip_family = CHIP_FAMILY_R200;
+ screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
+ break;
+
+ case PCI_CHIP_RV250_If:
+ case PCI_CHIP_RV250_Ig:
+ case PCI_CHIP_RV250_Ld:
+ case PCI_CHIP_RV250_Lf:
+ case PCI_CHIP_RV250_Lg:
+ screen->chip_family = CHIP_FAMILY_RV250;
+ screen->chip_flags = R200_CHIPSET_YCBCR_BROKEN | RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
+ break;
+
+ case PCI_CHIP_RV280_4C6E:
+ case PCI_CHIP_RV280_5960:
+ case PCI_CHIP_RV280_5961:
+ case PCI_CHIP_RV280_5962:
+ case PCI_CHIP_RV280_5964:
+ case PCI_CHIP_RV280_5965:
+ case PCI_CHIP_RV280_5C61:
+ case PCI_CHIP_RV280_5C63:
+ screen->chip_family = CHIP_FAMILY_RV280;
+ screen->chip_flags = RADEON_CHIPSET_TCL | RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
+ break;
+
+ case PCI_CHIP_RS300_5834:
+ case PCI_CHIP_RS300_5835:
+ case PCI_CHIP_RS350_7834:
+ case PCI_CHIP_RS350_7835:
+ screen->chip_family = CHIP_FAMILY_RS300;
+ screen->chip_flags = RADEON_CHIPSET_DEPTH_ALWAYS_TILED;
+ break;
+#endif
+
+ default:
+ fprintf(stderr, "unknown chip id 0x%x, can't guess.\n",
+ device_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static radeonScreenPtr
+radeonCreateScreen2(__DRIscreen *sPriv)
+{
+ radeonScreenPtr screen;
+ int i;
+ int ret;
+ uint32_t device_id = 0;
+
+ /* Allocate the private area */
+ screen = (radeonScreenPtr) CALLOC( sizeof(*screen) );
+ if ( !screen ) {
+ fprintf(stderr, "%s: Could not allocate memory for screen structure", __FUNCTION__);
+ fprintf(stderr, "leaving here\n");
+ return NULL;
+ }
+
+ radeon_init_debug();
+
+ /* parse information in __driConfigOptions */
+ driParseOptionInfo (&screen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ screen->chip_flags = 0;
+
+ screen->irq = 1;
+
+ ret = radeonGetParam(sPriv, RADEON_PARAM_DEVICE_ID, &device_id);
+ if (ret) {
+ FREE( screen );
+ fprintf(stderr, "drm_radeon_getparam_t (RADEON_PARAM_DEVICE_ID): %d\n", ret);
+ return NULL;
+ }
+
+ ret = radeon_set_screen_flags(screen, device_id);
+ if (ret == -1)
+ return NULL;
+
+ if (getenv("RADEON_NO_TCL"))
+ screen->chip_flags &= ~RADEON_CHIPSET_TCL;
+
+ i = 0;
+ screen->extensions[i++] = &dri2ConfigQueryExtension.base;
+
+#if defined(RADEON_R100)
+ screen->extensions[i++] = &radeonTexBufferExtension.base;
+#elif defined(RADEON_R200)
+ screen->extensions[i++] = &r200TexBufferExtension.base;
+#endif
+
+ screen->extensions[i++] = &radeonFlushExtension.base;
+ screen->extensions[i++] = &radeonImageExtension.base;
+
+ screen->extensions[i++] = NULL;
+ sPriv->extensions = screen->extensions;
+
+ screen->driScreen = sPriv;
+ screen->bom = radeon_bo_manager_gem_ctor(sPriv->fd);
+ if (screen->bom == NULL) {
+ free(screen);
+ return NULL;
+ }
+ return screen;
+}
+
+/* Destroy the device specific screen private data struct.
+ */
+static void
+radeonDestroyScreen( __DRIscreen *sPriv )
+{
+ radeonScreenPtr screen = (radeonScreenPtr)sPriv->driverPrivate;
+
+ if (!screen)
+ return;
+
+#ifdef RADEON_BO_TRACK
+ radeon_tracker_print(&screen->bom->tracker, stderr);
+#endif
+ radeon_bo_manager_gem_dtor(screen->bom);
+
+ /* free all option information */
+ driDestroyOptionInfo (&screen->optionCache);
+
+ FREE( screen );
+ sPriv->driverPrivate = NULL;
+}
+
+
+/* Initialize the driver specific screen private data.
+ */
+static GLboolean
+radeonInitDriver( __DRIscreen *sPriv )
+{
+ sPriv->driverPrivate = (void *) radeonCreateScreen2( sPriv );
+ if ( !sPriv->driverPrivate ) {
+ radeonDestroyScreen( sPriv );
+ return GL_FALSE;
+ }
+
+ return GL_TRUE;
+}
+
+
+
+/**
+ * Create the Mesa framebuffer and renderbuffers for a given window/drawable.
+ *
+ * \todo This function (and its interface) will need to be updated to support
+ * pbuffers.
+ */
+static GLboolean
+radeonCreateBuffer( __DRIscreen *driScrnPriv,
+ __DRIdrawable *driDrawPriv,
+ const struct gl_config *mesaVis,
+ GLboolean isPixmap )
+{
+ radeonScreenPtr screen = (radeonScreenPtr) driScrnPriv->driverPrivate;
+
+ const GLboolean swDepth = GL_FALSE;
+ const GLboolean swAlpha = GL_FALSE;
+ const GLboolean swAccum = mesaVis->accumRedBits > 0;
+ const GLboolean swStencil = mesaVis->stencilBits > 0 &&
+ mesaVis->depthBits != 24;
+ gl_format rgbFormat;
+ struct radeon_framebuffer *rfb;
+
+ if (isPixmap)
+ return GL_FALSE; /* not implemented */
+
+ rfb = CALLOC_STRUCT(radeon_framebuffer);
+ if (!rfb)
+ return GL_FALSE;
+
+ _mesa_initialize_window_framebuffer(&rfb->base, mesaVis);
+
+ if (mesaVis->redBits == 5)
+ rgbFormat = _mesa_little_endian() ? MESA_FORMAT_RGB565 : MESA_FORMAT_RGB565_REV;
+ else if (mesaVis->alphaBits == 0)
+ rgbFormat = _mesa_little_endian() ? MESA_FORMAT_XRGB8888 : MESA_FORMAT_XRGB8888_REV;
+ else
+ rgbFormat = _mesa_little_endian() ? MESA_FORMAT_ARGB8888 : MESA_FORMAT_ARGB8888_REV;
+
+ /* front color renderbuffer */
+ rfb->color_rb[0] = radeon_create_renderbuffer(rgbFormat, driDrawPriv);
+ _mesa_add_renderbuffer(&rfb->base, BUFFER_FRONT_LEFT, &rfb->color_rb[0]->base.Base);
+ rfb->color_rb[0]->has_surface = 1;
+
+ /* back color renderbuffer */
+ if (mesaVis->doubleBufferMode) {
+ rfb->color_rb[1] = radeon_create_renderbuffer(rgbFormat, driDrawPriv);
+ _mesa_add_renderbuffer(&rfb->base, BUFFER_BACK_LEFT, &rfb->color_rb[1]->base.Base);
+ rfb->color_rb[1]->has_surface = 1;
+ }
+
+ if (mesaVis->depthBits == 24) {
+ if (mesaVis->stencilBits == 8) {
+ struct radeon_renderbuffer *depthStencilRb =
+ radeon_create_renderbuffer(MESA_FORMAT_S8_Z24, driDrawPriv);
+ _mesa_add_renderbuffer(&rfb->base, BUFFER_DEPTH, &depthStencilRb->base.Base);
+ _mesa_add_renderbuffer(&rfb->base, BUFFER_STENCIL, &depthStencilRb->base.Base);
+ depthStencilRb->has_surface = screen->depthHasSurface;
+ } else {
+ /* depth renderbuffer */
+ struct radeon_renderbuffer *depth =
+ radeon_create_renderbuffer(MESA_FORMAT_X8_Z24, driDrawPriv);
+ _mesa_add_renderbuffer(&rfb->base, BUFFER_DEPTH, &depth->base.Base);
+ depth->has_surface = screen->depthHasSurface;
+ }
+ } else if (mesaVis->depthBits == 16) {
+ /* just 16-bit depth buffer, no hw stencil */
+ struct radeon_renderbuffer *depth =
+ radeon_create_renderbuffer(MESA_FORMAT_Z16, driDrawPriv);
+ _mesa_add_renderbuffer(&rfb->base, BUFFER_DEPTH, &depth->base.Base);
+ depth->has_surface = screen->depthHasSurface;
+ }
+
+ _swrast_add_soft_renderbuffers(&rfb->base,
+ GL_FALSE, /* color */
+ swDepth,
+ swStencil,
+ swAccum,
+ swAlpha,
+ GL_FALSE /* aux */);
+ driDrawPriv->driverPrivate = (void *) rfb;
+
+ return (driDrawPriv->driverPrivate != NULL);
+}
+
+
+static void radeon_cleanup_renderbuffers(struct radeon_framebuffer *rfb)
+{
+ struct radeon_renderbuffer *rb;
+
+ rb = rfb->color_rb[0];
+ if (rb && rb->bo) {
+ radeon_bo_unref(rb->bo);
+ rb->bo = NULL;
+ }
+ rb = rfb->color_rb[1];
+ if (rb && rb->bo) {
+ radeon_bo_unref(rb->bo);
+ rb->bo = NULL;
+ }
+ rb = radeon_get_renderbuffer(&rfb->base, BUFFER_DEPTH);
+ if (rb && rb->bo) {
+ radeon_bo_unref(rb->bo);
+ rb->bo = NULL;
+ }
+}
+
+void
+radeonDestroyBuffer(__DRIdrawable *driDrawPriv)
+{
+ struct radeon_framebuffer *rfb;
+ if (!driDrawPriv)
+ return;
+
+ rfb = (void*)driDrawPriv->driverPrivate;
+ if (!rfb)
+ return;
+ radeon_cleanup_renderbuffers(rfb);
+ _mesa_reference_framebuffer((struct gl_framebuffer **)(&(driDrawPriv->driverPrivate)), NULL);
+}
+
+#define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0]))
+
+/**
+ * This is the driver specific part of the createNewScreen entry point.
+ * Called when using DRI2.
+ *
+ * \return the struct gl_config supported by this driver
+ */
+static const
+__DRIconfig **radeonInitScreen2(__DRIscreen *psp)
+{
+ GLenum fb_format[3];
+ GLenum fb_type[3];
+ /* GLX_SWAP_COPY_OML is only supported because the Intel driver doesn't
+ * support pageflipping at all.
+ */
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, /*, GLX_SWAP_COPY_OML*/
+ };
+ uint8_t depth_bits[4], stencil_bits[4], msaa_samples_array[1];
+ int color;
+ __DRIconfig **configs = NULL;
+
+ if (!radeonInitDriver(psp)) {
+ return NULL;
+ }
+ depth_bits[0] = 0;
+ stencil_bits[0] = 0;
+ depth_bits[1] = 16;
+ stencil_bits[1] = 0;
+ depth_bits[2] = 24;
+ stencil_bits[2] = 0;
+ depth_bits[3] = 24;
+ stencil_bits[3] = 8;
+
+ msaa_samples_array[0] = 0;
+
+ fb_format[0] = GL_RGB;
+ fb_type[0] = GL_UNSIGNED_SHORT_5_6_5;
+
+ fb_format[1] = GL_BGR;
+ fb_type[1] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ fb_format[2] = GL_BGRA;
+ fb_type[2] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ for (color = 0; color < ARRAY_SIZE(fb_format); color++) {
+ __DRIconfig **new_configs;
+
+ new_configs = driCreateConfigs(fb_format[color], fb_type[color],
+ depth_bits,
+ stencil_bits,
+ ARRAY_SIZE(depth_bits),
+ back_buffer_modes,
+ ARRAY_SIZE(back_buffer_modes),
+ msaa_samples_array,
+ ARRAY_SIZE(msaa_samples_array),
+ GL_TRUE);
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ if (configs == NULL) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+
+ return (const __DRIconfig **)configs;
+}
+
+const struct __DriverAPIRec driDriverAPI = {
+ .InitScreen = radeonInitScreen2,
+ .DestroyScreen = radeonDestroyScreen,
+#if defined(RADEON_R200)
+ .CreateContext = r200CreateContext,
+ .DestroyContext = r200DestroyContext,
+#else
+ .CreateContext = r100CreateContext,
+ .DestroyContext = radeonDestroyContext,
+#endif
+ .CreateBuffer = radeonCreateBuffer,
+ .DestroyBuffer = radeonDestroyBuffer,
+ .MakeCurrent = radeonMakeCurrent,
+ .UnbindContext = radeonUnbindContext,
+};
+
+/* This is the table of extensions that the loader will dlsym() for. */
+PUBLIC const __DRIextension *__driDriverExtensions[] = {
+ &driCoreExtension.base,
+ &driDRI2Extension.base,
+ NULL
+};
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.h
index 23bb6bd4598..dd618f5836f 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_screen.h
@@ -1 +1,122 @@
-../radeon/radeon_screen.h \ No newline at end of file
+/**************************************************************************
+
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
+ VA Linux Systems Inc., Fremont, California.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __RADEON_SCREEN_H__
+#define __RADEON_SCREEN_H__
+
+/*
+ * IMPORTS: these headers contain all the DRI, X and kernel-related
+ * definitions that we need.
+ */
+#include "dri_util.h"
+#include "radeon_dri.h"
+#include "radeon_chipset.h"
+#include "radeon_reg.h"
+#include "drm_sarea.h"
+#include "xmlconfig.h"
+
+
+typedef struct {
+ drm_handle_t handle; /* Handle to the DRM region */
+ drmSize size; /* Size of the DRM region */
+ drmAddress map; /* Mapping of the DRM region */
+} radeonRegionRec, *radeonRegionPtr;
+
+typedef struct radeon_screen {
+ int chip_family;
+ int chip_flags;
+ int cpp;
+ int card_type;
+ int device_id; /* PCI ID */
+ int AGPMode;
+ unsigned int irq; /* IRQ number (0 means none) */
+
+ unsigned int fbLocation;
+ unsigned int frontOffset;
+ unsigned int frontPitch;
+ unsigned int backOffset;
+ unsigned int backPitch;
+
+ unsigned int depthOffset;
+ unsigned int depthPitch;
+
+ /* Shared texture data */
+ int numTexHeaps;
+ int texOffset[RADEON_NR_TEX_HEAPS];
+ int texSize[RADEON_NR_TEX_HEAPS];
+ int logTexGranularity[RADEON_NR_TEX_HEAPS];
+
+ radeonRegionRec mmio;
+ radeonRegionRec status;
+ radeonRegionRec gartTextures;
+
+ drmBufMapPtr buffers;
+
+ __volatile__ uint32_t *scratch;
+
+ __DRIscreen *driScreen;
+ unsigned int sarea_priv_offset;
+ unsigned int gart_buffer_offset; /* offset in card memory space */
+ unsigned int gart_texture_offset; /* offset in card memory space */
+ unsigned int gart_base;
+
+ GLboolean depthHasSurface;
+
+ /* Configuration cache with default values for all contexts */
+ driOptionCache optionCache;
+
+ const __DRIextension *extensions[17];
+
+ int num_gb_pipes;
+ int num_z_pipes;
+ drm_radeon_sarea_t *sarea; /* Private SAREA data */
+ struct radeon_bo_manager *bom;
+
+} radeonScreenRec, *radeonScreenPtr;
+
+struct __DRIimageRec {
+ struct radeon_bo *bo;
+ GLenum internal_format;
+ uint32_t dri_format;
+ GLuint format;
+ GLenum data_type;
+ int width, height; /* in pixels */
+ int pitch; /* in pixels */
+ int cpp;
+ void *data;
+};
+
+extern void radeonDestroyBuffer(__DRIdrawable *driDrawPriv);
+#endif /* __RADEON_SCREEN_H__ */
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.c
index 232868c4c9e..0617adc5bc5 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.c
@@ -1 +1,161 @@
-../radeon/radeon_span.c \ No newline at end of file
+/**************************************************************************
+
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
+ VA Linux Systems Inc., Fremont, California.
+
+The Weather Channel (TM) funded Tungsten Graphics to develop the
+initial release of the Radeon 8500 driver under the XFree86 license.
+This notice must be preserved.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ *
+ */
+
+#include "main/glheader.h"
+#include "main/texformat.h"
+#include "main/renderbuffer.h"
+#include "main/samplerobj.h"
+#include "swrast/swrast.h"
+#include "swrast/s_renderbuffer.h"
+
+#include "radeon_common.h"
+#include "radeon_span.h"
+
+
+static void
+radeon_renderbuffer_map(struct gl_context *ctx, struct gl_renderbuffer *rb)
+{
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+ GLubyte *map;
+ int stride;
+
+ if (!rb || !rrb)
+ return;
+
+ ctx->Driver.MapRenderbuffer(ctx, rb, 0, 0, rb->Width, rb->Height,
+ GL_MAP_READ_BIT | GL_MAP_WRITE_BIT,
+ &map, &stride);
+
+ rrb->base.Map = map;
+ rrb->base.RowStride = stride;
+ /* No floating point color buffers, use GLubytes */
+ rrb->base.ColorType = GL_UNSIGNED_BYTE;
+}
+
+static void
+radeon_renderbuffer_unmap(struct gl_context *ctx, struct gl_renderbuffer *rb)
+{
+ struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
+ if (!rb || !rrb)
+ return;
+
+ ctx->Driver.UnmapRenderbuffer(ctx, rb);
+
+ rrb->base.Map = NULL;
+ rrb->base.RowStride = 0;
+}
+
+static void
+radeon_map_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
+{
+ GLuint i;
+
+ radeon_print(RADEON_MEMORY, RADEON_TRACE,
+ "%s( %p , fb %p )\n",
+ __func__, ctx, fb);
+
+ /* check for render to textures */
+ for (i = 0; i < BUFFER_COUNT; i++)
+ radeon_renderbuffer_map(ctx, fb->Attachment[i].Renderbuffer);
+
+ radeon_check_front_buffer_rendering(ctx);
+}
+
+static void
+radeon_unmap_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
+{
+ GLuint i;
+
+ radeon_print(RADEON_MEMORY, RADEON_TRACE,
+ "%s( %p , fb %p)\n",
+ __func__, ctx, fb);
+
+ /* check for render to textures */
+ for (i = 0; i < BUFFER_COUNT; i++)
+ radeon_renderbuffer_unmap(ctx, fb->Attachment[i].Renderbuffer);
+
+ radeon_check_front_buffer_rendering(ctx);
+}
+
+static void radeonSpanRenderStart(struct gl_context * ctx)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ int i;
+
+ radeon_firevertices(rmesa);
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ radeon_validate_texture_miptree(ctx, _mesa_get_samplerobj(ctx, i),
+ ctx->Texture.Unit[i]._Current);
+ radeon_swrast_map_texture_images(ctx, ctx->Texture.Unit[i]._Current);
+ }
+ }
+
+ radeon_map_framebuffer(ctx, ctx->DrawBuffer);
+ if (ctx->ReadBuffer != ctx->DrawBuffer)
+ radeon_map_framebuffer(ctx, ctx->ReadBuffer);
+}
+
+static void radeonSpanRenderFinish(struct gl_context * ctx)
+{
+ int i;
+
+ _swrast_flush(ctx);
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++)
+ if (ctx->Texture.Unit[i]._ReallyEnabled)
+ radeon_swrast_unmap_texture_images(ctx, ctx->Texture.Unit[i]._Current);
+
+ radeon_unmap_framebuffer(ctx, ctx->DrawBuffer);
+ if (ctx->ReadBuffer != ctx->DrawBuffer)
+ radeon_unmap_framebuffer(ctx, ctx->ReadBuffer);
+}
+
+void radeonInitSpanFuncs(struct gl_context * ctx)
+{
+ struct swrast_device_driver *swdd =
+ _swrast_GetDeviceDriverReference(ctx);
+ swdd->SpanRenderStart = radeonSpanRenderStart;
+ swdd->SpanRenderFinish = radeonSpanRenderFinish;
+}
+
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.h
index f9d634508c2..64517b59237 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_span.h
@@ -1 +1,47 @@
-../radeon/radeon_span.h \ No newline at end of file
+/**************************************************************************
+
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
+ VA Linux Systems Inc., Fremont, California.
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+
+The Weather Channel (TM) funded Tungsten Graphics to develop the
+initial release of the Radeon 8500 driver under the XFree86 license.
+This notice must be preserved.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Kevin E. Martin <martin@valinux.com>
+ */
+
+#ifndef __RADEON_SPAN_H__
+#define __RADEON_SPAN_H__
+
+extern void radeonInitSpanFuncs(struct gl_context * ctx);
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tex_copy.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tex_copy.c
index dfa5ba34e65..a6c406b2bff 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tex_copy.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tex_copy.c
@@ -1 +1,160 @@
-../radeon/radeon_tex_copy.c \ No newline at end of file
+/*
+ * Copyright (C) 2009 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "radeon_common.h"
+#include "radeon_texture.h"
+
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/teximage.h"
+#include "main/texstate.h"
+#include "drivers/common/meta.h"
+
+#include "radeon_mipmap_tree.h"
+
+static GLboolean
+do_copy_texsubimage(struct gl_context *ctx,
+ struct radeon_tex_obj *tobj,
+ radeon_texture_image *timg,
+ GLint dstx, GLint dsty,
+ struct radeon_renderbuffer *rrb,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ const GLuint face = timg->base.Base.Face;
+ const GLuint level = timg->base.Base.Level;
+ unsigned src_bpp;
+ unsigned dst_bpp;
+ gl_format src_mesaformat;
+ gl_format dst_mesaformat;
+ unsigned flip_y;
+
+ if (!radeon->vtbl.blit) {
+ return GL_FALSE;
+ }
+
+ // This is software renderbuffer, fallback to swrast
+ if (!rrb) {
+ return GL_FALSE;
+ }
+
+ if (_mesa_get_format_bits(timg->base.Base.TexFormat, GL_DEPTH_BITS) > 0) {
+ /* copying depth values */
+ flip_y = ctx->ReadBuffer->Attachment[BUFFER_DEPTH].Type == GL_NONE;
+ } else {
+ /* copying color */
+ flip_y = ctx->ReadBuffer->Attachment[BUFFER_COLOR0].Type == GL_NONE;
+ }
+
+ if (!timg->mt) {
+ radeon_validate_texture_miptree(ctx, &tobj->base.Sampler, &tobj->base);
+ }
+
+ assert(rrb->bo);
+ assert(timg->mt);
+ assert(timg->mt->bo);
+ assert(timg->base.Base.Width >= dstx + width);
+ assert(timg->base.Base.Height >= dsty + height);
+
+ intptr_t src_offset = rrb->draw_offset;
+ intptr_t dst_offset = radeon_miptree_image_offset(timg->mt, face, level);
+
+ if (0) {
+ fprintf(stderr, "%s: copying to face %d, level %d\n",
+ __FUNCTION__, face, level);
+ fprintf(stderr, "to: x %d, y %d, offset %d\n", dstx, dsty, (uint32_t) dst_offset);
+ fprintf(stderr, "from (%dx%d) width %d, height %d, offset %d, pitch %d\n",
+ x, y, rrb->base.Base.Width, rrb->base.Base.Height, (uint32_t) src_offset, rrb->pitch/rrb->cpp);
+ fprintf(stderr, "src size %d, dst size %d\n", rrb->bo->size, timg->mt->bo->size);
+
+ }
+
+ src_mesaformat = rrb->base.Base.Format;
+ dst_mesaformat = timg->base.Base.TexFormat;
+ src_bpp = _mesa_get_format_bytes(src_mesaformat);
+ dst_bpp = _mesa_get_format_bytes(dst_mesaformat);
+ if (!radeon->vtbl.check_blit(dst_mesaformat, rrb->pitch / rrb->cpp)) {
+ /* depth formats tend to be special */
+ if (_mesa_get_format_bits(dst_mesaformat, GL_DEPTH_BITS) > 0)
+ return GL_FALSE;
+
+ if (src_bpp != dst_bpp)
+ return GL_FALSE;
+
+ switch (dst_bpp) {
+ case 2:
+ src_mesaformat = MESA_FORMAT_RGB565;
+ dst_mesaformat = MESA_FORMAT_RGB565;
+ break;
+ case 4:
+ src_mesaformat = MESA_FORMAT_ARGB8888;
+ dst_mesaformat = MESA_FORMAT_ARGB8888;
+ break;
+ case 1:
+ src_mesaformat = MESA_FORMAT_A8;
+ dst_mesaformat = MESA_FORMAT_A8;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ }
+
+ /* blit from src buffer to texture */
+ return radeon->vtbl.blit(ctx, rrb->bo, src_offset, src_mesaformat, rrb->pitch/rrb->cpp,
+ rrb->base.Base.Width, rrb->base.Base.Height, x, y,
+ timg->mt->bo, dst_offset, dst_mesaformat,
+ timg->mt->levels[level].rowstride / dst_bpp,
+ timg->base.Base.Width, timg->base.Base.Height,
+ dstx, dsty, width, height, flip_y);
+}
+
+void
+radeonCopyTexSubImage(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ struct gl_renderbuffer *rb,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ radeon_prepare_render(radeon);
+
+ if (dims != 2 || !do_copy_texsubimage(ctx,
+ radeon_tex_obj(texImage->TexObject),
+ (radeon_texture_image *)texImage,
+ xoffset, yoffset,
+ radeon_renderbuffer(rb), x, y, width, height)) {
+
+ radeon_print(RADEON_FALLBACKS, RADEON_NORMAL,
+ "Falling back to sw for glCopyTexSubImage2D\n");
+
+ _mesa_meta_CopyTexSubImage(ctx, dims, texImage,
+ xoffset, yoffset, zoffset,
+ rb, x, y, width, height);
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.c
index a822710915f..e405f9746f1 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.c
@@ -1 +1,833 @@
-../radeon/radeon_texture.c \ No newline at end of file
+/*
+ * Copyright (C) 2009 Maciej Cencora.
+ * Copyright (C) 2008 Nicolai Haehnle.
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "main/glheader.h"
+#include "main/imports.h"
+#include "main/context.h"
+#include "main/enums.h"
+#include "main/mfeatures.h"
+#include "main/mipmap.h"
+#include "main/pbo.h"
+#include "main/texcompress.h"
+#include "main/texstore.h"
+#include "main/teximage.h"
+#include "main/texobj.h"
+#include "drivers/common/meta.h"
+
+#include "xmlpool.h" /* for symbolic values of enum-type options */
+
+#include "radeon_common.h"
+
+#include "radeon_mipmap_tree.h"
+
+static void teximage_assign_miptree(radeonContextPtr rmesa,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+static radeon_mipmap_tree *radeon_miptree_create_for_teximage(radeonContextPtr rmesa,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
+ GLuint numrows, GLuint rowsize)
+{
+ assert(rowsize <= dststride);
+ assert(rowsize <= srcstride);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s dst %p, stride %u, src %p, stride %u, "
+ "numrows %u, rowsize %u.\n",
+ __func__, dst, dststride,
+ src, srcstride,
+ numrows, rowsize);
+
+ if (rowsize == srcstride && rowsize == dststride) {
+ memcpy(dst, src, numrows*rowsize);
+ } else {
+ GLuint i;
+ for(i = 0; i < numrows; ++i) {
+ memcpy(dst, src, rowsize);
+ dst += dststride;
+ src += srcstride;
+ }
+ }
+}
+
+/* textures */
+/**
+ * Allocate an empty texture image object.
+ */
+struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx)
+{
+ return CALLOC(sizeof(radeon_texture_image));
+}
+
+
+/**
+ * Delete a texture image object.
+ */
+static void
+radeonDeleteTextureImage(struct gl_context *ctx, struct gl_texture_image *img)
+{
+ /* nothing special (yet) for radeon_texture_image */
+ _mesa_delete_texture_image(ctx, img);
+}
+
+static GLboolean
+radeonAllocTextureImageBuffer(struct gl_context *ctx,
+ struct gl_texture_image *timage)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ radeon_texture_image *image = get_radeon_texture_image(timage);
+ struct gl_texture_object *texobj = timage->TexObject;
+ int slices;
+
+ ctx->Driver.FreeTextureImageBuffer(ctx, timage);
+
+ switch (texobj->Target) {
+ case GL_TEXTURE_3D:
+ slices = timage->Depth;
+ break;
+ default:
+ slices = 1;
+ }
+ assert(!image->base.ImageOffsets);
+ image->base.ImageOffsets = malloc(slices * sizeof(GLuint));
+ teximage_assign_miptree(rmesa, texobj, timage);
+
+ return GL_TRUE;
+}
+
+
+/**
+ * Free memory associated with this texture image.
+ */
+void radeonFreeTextureImageBuffer(struct gl_context *ctx, struct gl_texture_image *timage)
+{
+ radeon_texture_image* image = get_radeon_texture_image(timage);
+
+ if (image->mt) {
+ radeon_miptree_unreference(&image->mt);
+ } else {
+ _swrast_free_texture_image_buffer(ctx, timage);
+ }
+ if (image->bo) {
+ radeon_bo_unref(image->bo);
+ image->bo = NULL;
+ }
+ if (image->base.Buffer) {
+ _mesa_align_free(image->base.Buffer);
+ image->base.Buffer = NULL;
+ }
+
+ if (image->base.ImageOffsets) {
+ free(image->base.ImageOffsets);
+ image->base.ImageOffsets = NULL;
+ }
+}
+
+/* Set Data pointer and additional data for mapped texture image */
+static void teximage_set_map_data(radeon_texture_image *image)
+{
+ radeon_mipmap_level *lvl;
+
+ if (!image->mt) {
+ radeon_warning("%s(%p) Trying to set map data without miptree.\n",
+ __func__, image);
+
+ return;
+ }
+
+ lvl = &image->mt->levels[image->base.Base.Level];
+
+ image->base.Map = image->mt->bo->ptr + lvl->faces[image->base.Base.Face].offset;
+ image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.Base.TexFormat);
+}
+
+
+/**
+ * Map a single texture image for glTexImage and friends.
+ */
+void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
+{
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
+ "%s(img %p), write_enable %s.\n",
+ __func__, image,
+ write_enable ? "true": "false");
+ if (image->mt) {
+ assert(!image->base.Map);
+
+ radeon_bo_map(image->mt->bo, write_enable);
+ teximage_set_map_data(image);
+ }
+}
+
+
+void radeon_teximage_unmap(radeon_texture_image *image)
+{
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
+ "%s(img %p)\n",
+ __func__, image);
+ if (image->mt) {
+ assert(image->base.Map);
+
+ image->base.Map = 0;
+ radeon_bo_unmap(image->mt->bo);
+ }
+}
+
+/**
+ * Map texture memory/buffer into user space.
+ * Note: the region of interest parameters are ignored here.
+ * \param mapOut returns start of mapping of region of interest
+ * \param rowStrideOut returns row stride in bytes
+ */
+static void
+radeon_map_texture_image(struct gl_context *ctx,
+ struct gl_texture_image *texImage,
+ GLuint slice,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **map,
+ GLint *stride)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ radeon_texture_image *image = get_radeon_texture_image(texImage);
+ radeon_mipmap_tree *mt = image->mt;
+ GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat);
+ GLuint width = texImage->Width;
+ GLuint height = texImage->Height;
+ struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo;
+ unsigned int bw, bh;
+ GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0;
+
+ _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh);
+ assert(y % bh == 0);
+ y /= bh;
+ texel_size /= bw;
+
+ if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
+ "%s for texture that is "
+ "queued for GPU processing.\n",
+ __func__);
+ radeon_firevertices(rmesa);
+ }
+
+ if (image->bo) {
+ /* TFP case */
+ radeon_bo_map(image->bo, write);
+ *stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0, texImage->TexObject->Target);
+ *map = bo->ptr;
+ } else if (likely(mt)) {
+ void *base;
+ radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level];
+
+ radeon_bo_map(mt->bo, write);
+ base = mt->bo->ptr + lvl->faces[image->base.Base.Face].offset;
+
+ *stride = lvl->rowstride;
+ *map = base + (slice * height) * *stride;
+ } else {
+ /* texture data is in malloc'd memory */
+
+ assert(map);
+
+ *stride = _mesa_format_row_stride(texImage->TexFormat, width);
+ *map = image->base.Buffer + (slice * height) * *stride;
+ }
+
+ *map += y * *stride + x * texel_size;
+}
+
+static void
+radeon_unmap_texture_image(struct gl_context *ctx,
+ struct gl_texture_image *texImage, GLuint slice)
+{
+ radeon_texture_image *image = get_radeon_texture_image(texImage);
+
+ if (image->bo)
+ radeon_bo_unmap(image->bo);
+ else if (image->mt)
+ radeon_bo_unmap(image->mt->bo);
+}
+
+/* try to find a format which will only need a memcopy */
+static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
+ GLenum srcFormat,
+ GLenum srcType, GLboolean fbo)
+{
+#if defined(RADEON_R100)
+ /* r100 can only do this */
+ return _radeon_texformat_argb8888;
+#elif defined(RADEON_R200)
+ const GLuint ui = 1;
+ const GLubyte littleEndian = *((const GLubyte *)&ui);
+
+ if (fbo)
+ return _radeon_texformat_argb8888;
+
+ if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
+ (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
+ return MESA_FORMAT_RGBA8888;
+ } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
+ (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
+ return MESA_FORMAT_RGBA8888_REV;
+ } else
+ return _radeon_texformat_argb8888;
+#endif
+}
+
+gl_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
+ GLenum target,
+ GLint internalFormat,
+ GLenum format,
+ GLenum type)
+{
+ return radeonChooseTextureFormat(ctx, internalFormat, format,
+ type, 0);
+}
+
+gl_format radeonChooseTextureFormat(struct gl_context * ctx,
+ GLint internalFormat,
+ GLenum format,
+ GLenum type, GLboolean fbo)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ const GLboolean do32bpt =
+ (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
+ const GLboolean force16bpt =
+ (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
+ (void)format;
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s InternalFormat=%s(%d) type=%s format=%s\n",
+ __func__,
+ _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
+ _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "%s do32bpt=%d force16bpt=%d\n",
+ __func__, do32bpt, force16bpt);
+
+ switch (internalFormat) {
+ case 4:
+ case GL_RGBA:
+ case GL_COMPRESSED_RGBA:
+ switch (type) {
+ case GL_UNSIGNED_INT_10_10_10_2:
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
+ return do32bpt ? _radeon_texformat_argb8888 :
+ _radeon_texformat_argb1555;
+ case GL_UNSIGNED_SHORT_4_4_4_4:
+ case GL_UNSIGNED_SHORT_4_4_4_4_REV:
+ return _radeon_texformat_argb4444;
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ case GL_UNSIGNED_SHORT_1_5_5_5_REV:
+ return _radeon_texformat_argb1555;
+ default:
+ return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
+ _radeon_texformat_argb4444;
+ }
+
+ case 3:
+ case GL_RGB:
+ case GL_COMPRESSED_RGB:
+ switch (type) {
+ case GL_UNSIGNED_SHORT_4_4_4_4:
+ case GL_UNSIGNED_SHORT_4_4_4_4_REV:
+ return _radeon_texformat_argb4444;
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ case GL_UNSIGNED_SHORT_1_5_5_5_REV:
+ return _radeon_texformat_argb1555;
+ case GL_UNSIGNED_SHORT_5_6_5:
+ case GL_UNSIGNED_SHORT_5_6_5_REV:
+ return _radeon_texformat_rgb565;
+ default:
+ return do32bpt ? _radeon_texformat_argb8888 :
+ _radeon_texformat_rgb565;
+ }
+
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ return !force16bpt ?
+ radeonChoose8888TexFormat(rmesa, format, type, fbo) :
+ _radeon_texformat_argb4444;
+
+ case GL_RGBA4:
+ case GL_RGBA2:
+ return _radeon_texformat_argb4444;
+
+ case GL_RGB5_A1:
+ return _radeon_texformat_argb1555;
+
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ return !force16bpt ? _radeon_texformat_argb8888 :
+ _radeon_texformat_rgb565;
+
+ case GL_RGB5:
+ case GL_RGB4:
+ case GL_R3_G3_B2:
+ return _radeon_texformat_rgb565;
+
+ case GL_ALPHA:
+ case GL_ALPHA4:
+ case GL_ALPHA8:
+ case GL_ALPHA12:
+ case GL_ALPHA16:
+ case GL_COMPRESSED_ALPHA:
+#if defined(RADEON_R200)
+ /* r200: can't use a8 format since interpreting hw I8 as a8 would result
+ in wrong rgb values (same as alpha value instead of 0). */
+ return _radeon_texformat_al88;
+#else
+ return MESA_FORMAT_A8;
+#endif
+ case 1:
+ case GL_LUMINANCE:
+ case GL_LUMINANCE4:
+ case GL_LUMINANCE8:
+ case GL_LUMINANCE12:
+ case GL_LUMINANCE16:
+ case GL_COMPRESSED_LUMINANCE:
+ return MESA_FORMAT_L8;
+
+ case 2:
+ case GL_LUMINANCE_ALPHA:
+ case GL_LUMINANCE4_ALPHA4:
+ case GL_LUMINANCE6_ALPHA2:
+ case GL_LUMINANCE8_ALPHA8:
+ case GL_LUMINANCE12_ALPHA4:
+ case GL_LUMINANCE12_ALPHA12:
+ case GL_LUMINANCE16_ALPHA16:
+ case GL_COMPRESSED_LUMINANCE_ALPHA:
+ return _radeon_texformat_al88;
+
+ case GL_INTENSITY:
+ case GL_INTENSITY4:
+ case GL_INTENSITY8:
+ case GL_INTENSITY12:
+ case GL_INTENSITY16:
+ case GL_COMPRESSED_INTENSITY:
+ return MESA_FORMAT_I8;
+
+ case GL_YCBCR_MESA:
+ if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
+ type == GL_UNSIGNED_BYTE)
+ return MESA_FORMAT_YCBCR;
+ else
+ return MESA_FORMAT_YCBCR_REV;
+
+ case GL_RGB_S3TC:
+ case GL_RGB4_S3TC:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ return MESA_FORMAT_RGB_DXT1;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ return MESA_FORMAT_RGBA_DXT1;
+
+ case GL_RGBA_S3TC:
+ case GL_RGBA4_S3TC:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ return MESA_FORMAT_RGBA_DXT3;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ return MESA_FORMAT_RGBA_DXT5;
+
+ case GL_ALPHA16F_ARB:
+ return MESA_FORMAT_ALPHA_FLOAT16;
+ case GL_ALPHA32F_ARB:
+ return MESA_FORMAT_ALPHA_FLOAT32;
+ case GL_LUMINANCE16F_ARB:
+ return MESA_FORMAT_LUMINANCE_FLOAT16;
+ case GL_LUMINANCE32F_ARB:
+ return MESA_FORMAT_LUMINANCE_FLOAT32;
+ case GL_LUMINANCE_ALPHA16F_ARB:
+ return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
+ case GL_LUMINANCE_ALPHA32F_ARB:
+ return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
+ case GL_INTENSITY16F_ARB:
+ return MESA_FORMAT_INTENSITY_FLOAT16;
+ case GL_INTENSITY32F_ARB:
+ return MESA_FORMAT_INTENSITY_FLOAT32;
+ case GL_RGB16F_ARB:
+ return MESA_FORMAT_RGBA_FLOAT16;
+ case GL_RGB32F_ARB:
+ return MESA_FORMAT_RGBA_FLOAT32;
+ case GL_RGBA16F_ARB:
+ return MESA_FORMAT_RGBA_FLOAT16;
+ case GL_RGBA32F_ARB:
+ return MESA_FORMAT_RGBA_FLOAT32;
+
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT16:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+ case GL_DEPTH_STENCIL_EXT:
+ case GL_DEPTH24_STENCIL8_EXT:
+ return MESA_FORMAT_S8_Z24;
+
+ /* EXT_texture_sRGB */
+ case GL_SRGB:
+ case GL_SRGB8:
+ case GL_SRGB_ALPHA:
+ case GL_SRGB8_ALPHA8:
+ case GL_COMPRESSED_SRGB:
+ case GL_COMPRESSED_SRGB_ALPHA:
+ return MESA_FORMAT_SARGB8;
+
+ case GL_SLUMINANCE:
+ case GL_SLUMINANCE8:
+ case GL_COMPRESSED_SLUMINANCE:
+ return MESA_FORMAT_SL8;
+
+ case GL_SLUMINANCE_ALPHA:
+ case GL_SLUMINANCE8_ALPHA8:
+ case GL_COMPRESSED_SLUMINANCE_ALPHA:
+ return MESA_FORMAT_SLA8;
+
+ case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
+ return MESA_FORMAT_SRGB_DXT1;
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
+ return MESA_FORMAT_SRGBA_DXT1;
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
+ return MESA_FORMAT_SRGBA_DXT3;
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+ return MESA_FORMAT_SRGBA_DXT5;
+
+ default:
+ _mesa_problem(ctx,
+ "unexpected internalFormat 0x%x in %s",
+ (int)internalFormat, __func__);
+ return MESA_FORMAT_NONE;
+ }
+
+ return MESA_FORMAT_NONE; /* never get here */
+}
+
+/** Check if given image is valid within current texture object.
+ */
+static void teximage_assign_miptree(radeonContextPtr rmesa,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ radeonTexObj *t = radeon_tex_obj(texObj);
+ radeon_texture_image* image = get_radeon_texture_image(texImage);
+
+ /* Try using current miptree, or create new if there isn't any */
+ if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage)) {
+ radeon_miptree_unreference(&t->mt);
+ t->mt = radeon_miptree_create_for_teximage(rmesa,
+ texObj,
+ texImage);
+
+ radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
+ "%s: texObj %p, texImage %p, "
+ "texObj miptree doesn't match, allocated new miptree %p\n",
+ __FUNCTION__, texObj, texImage, t->mt);
+ }
+
+ /* Miptree alocation may have failed,
+ * when there was no image for baselevel specified */
+ if (t->mt) {
+ radeon_miptree_reference(t->mt, &image->mt);
+ } else
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
+ "%s Failed to allocate miptree.\n", __func__);
+}
+
+unsigned radeonIsFormatRenderable(gl_format mesa_format)
+{
+ if (mesa_format == _radeon_texformat_argb8888 || mesa_format == _radeon_texformat_rgb565 ||
+ mesa_format == _radeon_texformat_argb1555 || mesa_format == _radeon_texformat_argb4444)
+ return 1;
+
+ switch (mesa_format)
+ {
+ case MESA_FORMAT_Z16:
+ case MESA_FORMAT_S8_Z24:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+#if FEATURE_OES_EGL_image
+void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLeglImageOES image_handle)
+{
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
+ radeonTexObj *t = radeon_tex_obj(texObj);
+ radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = radeon->dri.screen;
+ image = screen->dri2.image->lookupEGLImage(screen, image_handle,
+ screen->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ radeonFreeTextureImageBuffer(ctx, texImage);
+
+ texImage->Width = image->width;
+ texImage->Height = image->height;
+ texImage->Depth = 1;
+ texImage->_BaseFormat = GL_RGBA;
+ texImage->TexFormat = image->format;
+ radeonImage->base.RowStride = image->pitch;
+ texImage->InternalFormat = image->internal_format;
+
+ if(t->mt)
+ {
+ radeon_miptree_unreference(&t->mt);
+ t->mt = NULL;
+ }
+
+ /* NOTE: The following is *very* ugly and will probably break. But
+ I don't know how to deal with it, without creating a whole new
+ function like radeon_miptree_from_bo() so I'm going with the
+ easy but error-prone way. */
+
+ radeon_try_alloc_miptree(radeon, t);
+
+ radeon_miptree_reference(t->mt, &radeonImage->mt);
+
+ if (t->mt == NULL)
+ {
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
+ "%s Failed to allocate miptree.\n", __func__);
+ return;
+ }
+
+ /* Particularly ugly: this is guaranteed to break, if image->bo is
+ not of the required size for a miptree. */
+ radeon_bo_unref(t->mt->bo);
+ radeon_bo_ref(image->bo);
+ t->mt->bo = image->bo;
+
+ if (!radeon_miptree_matches_image(t->mt, &radeonImage->base.Base))
+ fprintf(stderr, "miptree doesn't match image\n");
+}
+#endif
+
+gl_format _radeon_texformat_rgba8888 = MESA_FORMAT_NONE;
+gl_format _radeon_texformat_argb8888 = MESA_FORMAT_NONE;
+gl_format _radeon_texformat_rgb565 = MESA_FORMAT_NONE;
+gl_format _radeon_texformat_argb4444 = MESA_FORMAT_NONE;
+gl_format _radeon_texformat_argb1555 = MESA_FORMAT_NONE;
+gl_format _radeon_texformat_al88 = MESA_FORMAT_NONE;
+/*@}*/
+
+
+static void
+radeonInitTextureFormats(void)
+{
+ if (_mesa_little_endian()) {
+ _radeon_texformat_rgba8888 = MESA_FORMAT_RGBA8888;
+ _radeon_texformat_argb8888 = MESA_FORMAT_ARGB8888;
+ _radeon_texformat_rgb565 = MESA_FORMAT_RGB565;
+ _radeon_texformat_argb4444 = MESA_FORMAT_ARGB4444;
+ _radeon_texformat_argb1555 = MESA_FORMAT_ARGB1555;
+ _radeon_texformat_al88 = MESA_FORMAT_AL88;
+ }
+ else {
+ _radeon_texformat_rgba8888 = MESA_FORMAT_RGBA8888_REV;
+ _radeon_texformat_argb8888 = MESA_FORMAT_ARGB8888_REV;
+ _radeon_texformat_rgb565 = MESA_FORMAT_RGB565_REV;
+ _radeon_texformat_argb4444 = MESA_FORMAT_ARGB4444_REV;
+ _radeon_texformat_argb1555 = MESA_FORMAT_ARGB1555_REV;
+ _radeon_texformat_al88 = MESA_FORMAT_AL88_REV;
+ }
+}
+
+void
+radeon_init_common_texture_funcs(radeonContextPtr radeon,
+ struct dd_function_table *functions)
+{
+ functions->NewTextureImage = radeonNewTextureImage;
+ functions->DeleteTextureImage = radeonDeleteTextureImage;
+ functions->AllocTextureImageBuffer = radeonAllocTextureImageBuffer;
+ functions->FreeTextureImageBuffer = radeonFreeTextureImageBuffer;
+ functions->MapTextureImage = radeon_map_texture_image;
+ functions->UnmapTextureImage = radeon_unmap_texture_image;
+
+ functions->ChooseTextureFormat = radeonChooseTextureFormat_mesa;
+
+ functions->CopyTexSubImage = radeonCopyTexSubImage;
+
+ functions->Bitmap = _mesa_meta_Bitmap;
+#if FEATURE_OES_EGL_image
+ functions->EGLImageTargetTexture2D = radeon_image_target_texture_2d;
+#endif
+
+ radeonInitTextureFormats();
+}
+
+static void
+radeon_swrast_map_image(radeonContextPtr rmesa,
+ radeon_texture_image *image)
+{
+ GLuint level, face;
+ radeon_mipmap_tree *mt;
+ GLuint texel_size;
+ radeon_mipmap_level *lvl;
+ int rs;
+
+ if (!image || !image->mt)
+ return;
+
+ texel_size = _mesa_get_format_bytes(image->base.Base.TexFormat);
+ level = image->base.Base.Level;
+ face = image->base.Base.Face;
+ mt = image->mt;
+
+ lvl = &image->mt->levels[level];
+
+ rs = lvl->rowstride / texel_size;
+
+ radeon_bo_map(mt->bo, 1);
+
+ image->base.Map = mt->bo->ptr + lvl->faces[face].offset;
+ if (mt->target == GL_TEXTURE_3D) {
+ int i;
+
+ for (i = 0; i < mt->levels[level].depth; i++)
+ image->base.ImageOffsets[i] = rs * lvl->height * i;
+ }
+ image->base.RowStride = rs;
+}
+
+static void
+radeon_swrast_unmap_image(radeonContextPtr rmesa,
+ radeon_texture_image *image)
+{
+ if (image && image->mt) {
+ image->base.Map = NULL;
+ radeon_bo_unmap(image->mt->bo);
+ }
+}
+
+void
+radeon_swrast_map_texture_images(struct gl_context *ctx,
+ struct gl_texture_object *texObj)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ GLuint nr_faces = _mesa_num_tex_faces(texObj->Target);
+ int i, face;
+
+ for (i = texObj->BaseLevel; i <= texObj->_MaxLevel; i++) {
+ for (face = 0; face < nr_faces; face++) {
+ radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][i]);
+ radeon_swrast_map_image(rmesa, image);
+ }
+ }
+}
+
+void
+radeon_swrast_unmap_texture_images(struct gl_context *ctx,
+ struct gl_texture_object *texObj)
+{
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
+ GLuint nr_faces = _mesa_num_tex_faces(texObj->Target);
+ int i, face;
+
+ for (i = texObj->BaseLevel; i <= texObj->_MaxLevel; i++) {
+ for (face = 0; face < nr_faces; face++) {
+ radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][i]);
+ radeon_swrast_unmap_image(rmesa, image);
+ }
+ }
+
+}
+
+static radeon_mipmap_tree *radeon_miptree_create_for_teximage(radeonContextPtr rmesa,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ radeonTexObj *t = radeon_tex_obj(texObj);
+ GLuint firstLevel;
+ GLuint lastLevel;
+ int width, height, depth;
+ int i;
+
+ width = texImage->Width;
+ height = texImage->Height;
+ depth = texImage->Depth;
+
+ if (texImage->Level > texObj->BaseLevel &&
+ (width == 1 ||
+ (texObj->Target != GL_TEXTURE_1D && height == 1) ||
+ (texObj->Target == GL_TEXTURE_3D && depth == 1))) {
+ /* For this combination, we're at some lower mipmap level and
+ * some important dimension is 1. We can't extrapolate up to a
+ * likely base level width/height/depth for a full mipmap stack
+ * from this info, so just allocate this one level.
+ */
+ firstLevel = texImage->Level;
+ lastLevel = texImage->Level;
+ } else {
+ if (texImage->Level < texObj->BaseLevel)
+ firstLevel = 0;
+ else
+ firstLevel = texObj->BaseLevel;
+
+ for (i = texImage->Level; i > firstLevel; i--) {
+ width <<= 1;
+ if (height != 1)
+ height <<= 1;
+ if (depth != 1)
+ depth <<= 1;
+ }
+ if ((texObj->Sampler.MinFilter == GL_NEAREST ||
+ texObj->Sampler.MinFilter == GL_LINEAR) &&
+ texImage->Level == firstLevel) {
+ lastLevel = firstLevel;
+ } else {
+ lastLevel = firstLevel + _mesa_logbase2(MAX2(MAX2(width, height), depth));
+ }
+ }
+
+ return radeon_miptree_create(rmesa, texObj->Target,
+ texImage->TexFormat, firstLevel, lastLevel - firstLevel + 1,
+ width, height, depth,
+ t->tile_bits);
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.h
index 17fac3d5ea5..88aace840bb 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_texture.h
@@ -1 +1,91 @@
-../radeon/radeon_texture.h \ No newline at end of file
+/*
+ * Copyright (C) 2008 Nicolai Haehnle.
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_TEXTURE_H
+#define RADEON_TEXTURE_H
+
+#include "main/formats.h"
+#include "main/mfeatures.h"
+
+extern gl_format _radeon_texformat_rgba8888;
+extern gl_format _radeon_texformat_argb8888;
+extern gl_format _radeon_texformat_rgb565;
+extern gl_format _radeon_texformat_argb4444;
+extern gl_format _radeon_texformat_argb1555;
+extern gl_format _radeon_texformat_al88;
+
+extern
+void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
+ GLuint numrows, GLuint rowsize);
+struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx);
+void radeonFreeTextureImageBuffer(struct gl_context *ctx, struct gl_texture_image *timage);
+
+void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable);
+void radeon_teximage_unmap(radeon_texture_image *image);
+int radeon_validate_texture_miptree(struct gl_context * ctx,
+ struct gl_sampler_object *samp,
+ struct gl_texture_object *texObj);
+
+
+void radeon_swrast_map_texture_images(struct gl_context *ctx, struct gl_texture_object *texObj);
+void radeon_swrast_unmap_texture_images(struct gl_context *ctx, struct gl_texture_object *texObj);
+
+gl_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
+ GLenum target,
+ GLint internalFormat,
+ GLenum format,
+ GLenum type);
+
+gl_format radeonChooseTextureFormat(struct gl_context * ctx,
+ GLint internalFormat,
+ GLenum format,
+ GLenum type, GLboolean fbo);
+
+void radeonCopyTexSubImage(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ struct gl_renderbuffer *rb,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height);
+
+unsigned radeonIsFormatRenderable(gl_format mesa_format);
+
+#if FEATURE_OES_EGL_image
+void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLeglImageOES image_handle);
+#endif
+
+void
+radeon_init_common_texture_funcs(radeonContextPtr radeon,
+ struct dd_function_table *functions);
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.c b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.c
index d4bfe27da64..403da110106 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.c
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.c
@@ -1 +1,512 @@
-../radeon/radeon_tile.c \ No newline at end of file
+/*
+ * Copyright (C) 2010 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "radeon_tile.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include "main/macros.h"
+#include "radeon_debug.h"
+
+#define MICRO_TILE_SIZE 32
+
+static void micro_tile_8_x_4_8bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current source row */
+ unsigned col; /* current source column */
+ unsigned k; /* number of processed tiles */
+ const unsigned tile_width = 8, tile_height = 4;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint8_t *src2 = (uint8_t *)src + src_pitch * row + col;
+ uint8_t *dst2 = (uint8_t *)dst + row * dst_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint8_t);
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint8_t));
+ dst2 += tile_width;
+ src2 += src_pitch;
+ }
+ }
+ }
+}
+
+static void micro_tile_4_x_4_16bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current source row */
+ unsigned col; /* current source column */
+ unsigned k; /* number of processed tiles */
+ const unsigned tile_width = 4, tile_height = 4;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint16_t *src2 = (uint16_t *)src + src_pitch * row + col;
+ uint16_t *dst2 = (uint16_t *)dst + row * dst_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint16_t));
+ dst2 += tile_width;
+ src2 += src_pitch;
+ }
+ }
+ }
+}
+
+static void micro_tile_8_x_2_16bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current source row */
+ unsigned col; /* current source column */
+ unsigned k; /* number of processed tiles */
+ const unsigned tile_width = 8, tile_height = 2;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint16_t *src2 = (uint16_t *)src + src_pitch * row + col;
+ uint16_t *dst2 = (uint16_t *)dst + row * dst_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint16_t));
+ dst2 += tile_width;
+ src2 += src_pitch;
+ }
+ }
+ }
+}
+
+static void micro_tile_4_x_2_32bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current source row */
+ unsigned col; /* current source column */
+ unsigned k; /* number of processed tiles */
+ const unsigned tile_width = 4, tile_height = 2;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint32_t *src2 = (uint32_t *)src + src_pitch * row + col;
+ uint32_t *dst2 = (uint32_t *)dst + row * dst_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint32_t);
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint32_t));
+ dst2 += tile_width;
+ src2 += src_pitch;
+ }
+ }
+ }
+}
+
+static void micro_tile_2_x_2_64bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current source row */
+ unsigned col; /* current source column */
+ unsigned k; /* number of processed tiles */
+ const unsigned tile_width = 2, tile_height = 2;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint64_t *src2 = (uint64_t *)src + src_pitch * row + col;
+ uint64_t *dst2 = (uint64_t *)dst + row * dst_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint64_t);
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint64_t));
+ dst2 += tile_width;
+ src2 += src_pitch;
+ }
+ }
+ }
+}
+
+static void micro_tile_1_x_1_128bit(const void * src, unsigned src_pitch,
+ void * dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned i, j;
+ const unsigned elem_size = 16; /* sizeof(uint128_t) */
+
+ for (j = 0; j < height; ++j)
+ {
+ for (i = 0; i < width; ++i)
+ {
+ memcpy(dst, src, width * elem_size);
+ dst += dst_pitch * elem_size;
+ src += src_pitch * elem_size;
+ }
+ }
+}
+
+void tile_image(const void * src, unsigned src_pitch,
+ void *dst, unsigned dst_pitch,
+ gl_format format, unsigned width, unsigned height)
+{
+ assert(src_pitch >= width);
+ assert(dst_pitch >= width);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "Software tiling: src_pitch %d, dst_pitch %d, width %d, height %d, bpp %d\n",
+ src_pitch, dst_pitch, width, height, _mesa_get_format_bytes(format));
+
+ switch (_mesa_get_format_bytes(format))
+ {
+ case 16:
+ micro_tile_1_x_1_128bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ case 8:
+ micro_tile_2_x_2_64bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ case 4:
+ micro_tile_4_x_2_32bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ case 2:
+ if (_mesa_get_format_bits(format, GL_DEPTH_BITS))
+ {
+ micro_tile_4_x_4_16bit(src, src_pitch, dst, dst_pitch, width, height);
+ }
+ else
+ {
+ micro_tile_8_x_2_16bit(src, src_pitch, dst, dst_pitch, width, height);
+ }
+ break;
+ case 1:
+ micro_tile_8_x_4_8bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+static void micro_untile_8_x_4_8bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current destination row */
+ unsigned col; /* current destination column */
+ unsigned k; /* current tile number */
+ const unsigned tile_width = 8, tile_height = 4;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ assert(src_pitch % tile_width == 0);
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint8_t *src2 = (uint8_t *)src + row * src_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint8_t);
+ uint8_t *dst2 = (uint8_t *)dst + dst_pitch * row + col;
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint8_t));
+ dst2 += dst_pitch;
+ src2 += tile_width;
+ }
+ }
+ }
+}
+
+static void micro_untile_8_x_2_16bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current destination row */
+ unsigned col; /* current destination column */
+ unsigned k; /* current tile number */
+ const unsigned tile_width = 8, tile_height = 2;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ assert(src_pitch % tile_width == 0);
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint16_t *src2 = (uint16_t *)src + row * src_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
+ uint16_t *dst2 = (uint16_t *)dst + dst_pitch * row + col;
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint16_t));
+ dst2 += dst_pitch;
+ src2 += tile_width;
+ }
+ }
+ }
+}
+
+static void micro_untile_4_x_4_16bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current destination row */
+ unsigned col; /* current destination column */
+ unsigned k; /* current tile number */
+ const unsigned tile_width = 4, tile_height = 4;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ assert(src_pitch % tile_width == 0);
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint16_t *src2 = (uint16_t *)src + row * src_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint16_t);
+ uint16_t *dst2 = (uint16_t *)dst + dst_pitch * row + col;
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint16_t));
+ dst2 += dst_pitch;
+ src2 += tile_width;
+ }
+ }
+ }
+}
+
+static void micro_untile_4_x_2_32bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current destination row */
+ unsigned col; /* current destination column */
+ unsigned k; /* current tile number */
+ const unsigned tile_width = 4, tile_height = 2;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ assert(src_pitch % tile_width == 0);
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint32_t *src2 = (uint32_t *)src + row * src_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint32_t);
+ uint32_t *dst2 = (uint32_t *)dst + dst_pitch * row + col;
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint32_t));
+ dst2 += dst_pitch;
+ src2 += tile_width;
+ }
+ }
+ }
+}
+
+static void micro_untile_2_x_2_64bit(const void * const src, unsigned src_pitch,
+ void * const dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned row; /* current destination row */
+ unsigned col; /* current destination column */
+ unsigned k; /* current tile number */
+ const unsigned tile_width = 2, tile_height = 2;
+ const unsigned tiles_in_row = (width + (tile_width - 1)) / tile_width;
+
+ assert(src_pitch % tile_width == 0);
+
+ k = 0;
+ for (row = 0; row < height; row += tile_height)
+ {
+ for (col = 0; col < width; col += tile_width, ++k)
+ {
+ uint64_t *src2 = (uint64_t *)src + row * src_pitch +
+ (k % tiles_in_row) * MICRO_TILE_SIZE / sizeof(uint64_t);
+ uint64_t *dst2 = (uint64_t *)dst + dst_pitch * row + col;
+ unsigned j;
+
+ for (j = 0; j < MIN2(tile_height, height - row); ++j)
+ {
+ unsigned columns = MIN2(tile_width, width - col);
+ memcpy(dst2, src2, columns * sizeof(uint64_t));
+ dst2 += dst_pitch;
+ src2 += tile_width;
+ }
+ }
+ }
+}
+
+static void micro_untile_1_x_1_128bit(const void * src, unsigned src_pitch,
+ void * dst, unsigned dst_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned i, j;
+ const unsigned elem_size = 16; /* sizeof(uint128_t) */
+
+ for (j = 0; j < height; ++j)
+ {
+ for (i = 0; i < width; ++i)
+ {
+ memcpy(dst, src, width * elem_size);
+ dst += dst_pitch * elem_size;
+ src += src_pitch * elem_size;
+ }
+ }
+}
+
+void untile_image(const void * src, unsigned src_pitch,
+ void *dst, unsigned dst_pitch,
+ gl_format format, unsigned width, unsigned height)
+{
+ assert(src_pitch >= width);
+ assert(dst_pitch >= width);
+
+ radeon_print(RADEON_TEXTURE, RADEON_TRACE,
+ "Software untiling: src_pitch %d, dst_pitch %d, width %d, height %d, bpp %d\n",
+ src_pitch, dst_pitch, width, height, _mesa_get_format_bytes(format));
+
+ switch (_mesa_get_format_bytes(format))
+ {
+ case 16:
+ micro_untile_1_x_1_128bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ case 8:
+ micro_untile_2_x_2_64bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ case 4:
+ micro_untile_4_x_2_32bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ case 2:
+ if (_mesa_get_format_bits(format, GL_DEPTH_BITS))
+ {
+ micro_untile_4_x_4_16bit(src, src_pitch, dst, dst_pitch, width, height);
+ }
+ else
+ {
+ micro_untile_8_x_2_16bit(src, src_pitch, dst, dst_pitch, width, height);
+ }
+ break;
+ case 1:
+ micro_untile_8_x_4_8bit(src, src_pitch, dst, dst_pitch, width, height);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+void get_tile_size(gl_format format, unsigned *block_width, unsigned *block_height)
+{
+ switch (_mesa_get_format_bytes(format))
+ {
+ case 16:
+ *block_width = 1;
+ *block_height = 1;
+ break;
+ case 8:
+ *block_width = 2;
+ *block_height = 2;
+ break;
+ case 4:
+ *block_width = 4;
+ *block_height = 2;
+ break;
+ case 2:
+ if (_mesa_get_format_bits(format, GL_DEPTH_BITS))
+ {
+ *block_width = 4;
+ *block_height = 4;
+ }
+ else
+ {
+ *block_width = 8;
+ *block_height = 2;
+ }
+ break;
+ case 1:
+ *block_width = 8;
+ *block_height = 4;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.h
index 31074c581ea..31d9c5611c3 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/radeon_tile.h
@@ -1 +1,38 @@
-../radeon/radeon_tile.h \ No newline at end of file
+/*
+ * Copyright (C) 2010 Maciej Cencora <m.cencora@gmail.com>
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <main/formats.h>
+
+void tile_image(const void * src, unsigned src_pitch,
+ void *dst, unsigned dst_pitch,
+ gl_format format, unsigned width, unsigned height);
+
+void untile_image(const void * src, unsigned src_pitch,
+ void *dst, unsigned dst_pitch,
+ gl_format format, unsigned width, unsigned height);
+
+void get_tile_size(gl_format format, unsigned *block_width, unsigned *block_height);
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_dri.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_dri.h
index 27c591d3c9d..dc513721075 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_dri.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_dri.h
@@ -1 +1,115 @@
-../../radeon/server/radeon_dri.h \ No newline at end of file
+/**
+ * \file server/radeon_dri.h
+ * \brief Radeon server-side structures.
+ *
+ * \author Kevin E. Martin <martin@xfree86.org>
+ * \author Rickard E. Faith <faith@valinux.com>
+ */
+
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario,
+ * VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _RADEON_DRI_
+#define _RADEON_DRI_
+
+#include "xf86drm.h"
+#include "drm.h"
+#include "radeon_drm.h"
+
+/* DRI Driver defaults */
+#define RADEON_DEFAULT_CP_PIO_MODE RADEON_CSQ_PRIPIO_INDPIO
+#define RADEON_DEFAULT_CP_BM_MODE RADEON_CSQ_PRIBM_INDBM
+#define RADEON_DEFAULT_AGP_MODE 1
+#define RADEON_DEFAULT_AGP_FAST_WRITE 0
+#define RADEON_DEFAULT_AGP_SIZE 8 /* MB (must be 2^n and > 4MB) */
+#define RADEON_DEFAULT_RING_SIZE 1 /* MB (must be page aligned) */
+#define RADEON_DEFAULT_BUFFER_SIZE 2 /* MB (must be page aligned) */
+#define RADEON_DEFAULT_AGP_TEX_SIZE 1 /* MB (must be page aligned) */
+#define RADEON_DEFAULT_CP_TIMEOUT 10000 /* usecs */
+#define RADEON_DEFAULT_PAGE_FLIP 0 /* page flipping diabled */
+#define RADEON_BUFFER_ALIGN 0x00000fff
+
+/**
+ * \brief Radeon DRI driver private data.
+ */
+typedef struct {
+ /**
+ * \name DRI screen private data
+ */
+ /*@{*/
+ int deviceID; /**< \brief PCI device ID */
+ int width; /**< \brief width in pixels of display */
+ int height; /**< \brief height in scanlines of display */
+ int depth; /**< \brief depth of display (8, 15, 16, 24) */
+ int bpp; /**< \brief bit depth of display (8, 16, 24, 32) */
+
+ int IsPCI; /**< \brief is current card a PCI card? */
+ int AGPMode; /**< \brief AGP mode */
+
+ int frontOffset; /**< \brief front buffer offset */
+ int frontPitch; /**< \brief front buffer pitch */
+ int backOffset; /**< \brief shared back buffer offset */
+ int backPitch; /**< \brief shared back buffer pitch */
+ int depthOffset; /**< \brief shared depth buffer offset */
+ int depthPitch; /**< \brief shared depth buffer pitch */
+ int textureOffset; /**< \brief start of texture data in frame buffer */
+ int textureSize; /**< \brief size of texture date */
+ int log2TexGran; /**< \brief log2 texture granularity */
+ /*@}*/
+
+ /**
+ * \name MMIO register data
+ */
+ /*@{*/
+ drm_handle_t registerHandle; /**< \brief MMIO register map size */
+ drmSize registerSize; /**< \brief MMIO register map handle */
+ /*@}*/
+
+ /**
+ * \name CP in-memory status information
+ */
+ /*@{*/
+ drm_handle_t statusHandle; /**< \brief status map handle */
+ drmSize statusSize; /**< \brief status map size */
+ /*@}*/
+
+ /**
+ * \name CP AGP Texture data
+ */
+ /*@{*/
+ drm_handle_t gartTexHandle; /**< \brief AGP texture area map handle */
+ drmSize gartTexMapSize; /**< \brief AGP texture area map size */
+ int log2GARTTexGran; /**< \brief AGP texture granularity in log base 2 */
+ int gartTexOffset; /**< \brief AGP texture area offset in AGP space */
+ /*@}*/
+
+ unsigned int sarea_priv_offset; /**< \brief offset of the private SAREA data*/
+} RADEONDRIRec, *RADEONDRIPtr;
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_macros.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_macros.h
index c56cd735b83..355262c9bab 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_macros.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_macros.h
@@ -1 +1,128 @@
-../../radeon/server/radeon_macros.h \ No newline at end of file
+/**
+ * \file server/radeon_macros.h
+ * \brief Macros for Radeon MMIO operation.
+ *
+ * \authors Kevin E. Martin <martin@xfree86.org>
+ * \authors Rickard E. Faith <faith@valinux.com>
+ * \authors Alan Hourihane <alanh@fairlite.demon.co.uk>
+ */
+
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ * VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _RADEON_MACROS_H_
+#define _RADEON_MACROS_H_
+
+#include <mmio.h>
+
+# define MMIO_IN8(base, offset) \
+ *(volatile unsigned char *)(((unsigned char*)(base)) + (offset))
+# define MMIO_IN32(base, offset) \
+ read_MMIO_LE32(base, offset)
+# define MMIO_OUT8(base, offset, val) \
+ *(volatile unsigned char *)(((unsigned char*)(base)) + (offset)) = (val)
+# define MMIO_OUT32(base, offset, val) \
+ *(volatile unsigned int *)(void *)(((unsigned char*)(base)) + (offset)) = CPU_TO_LE32(val)
+
+
+ /* Memory mapped register access macros */
+#define INREG8(addr) MMIO_IN8(RADEONMMIO, addr)
+#define INREG(addr) MMIO_IN32(RADEONMMIO, addr)
+#define OUTREG8(addr, val) MMIO_OUT8(RADEONMMIO, addr, val)
+#define OUTREG(addr, val) MMIO_OUT32(RADEONMMIO, addr, val)
+
+#define ADDRREG(addr) ((volatile GLuint *)(pointer)(RADEONMMIO + (addr)))
+
+
+#define OUTREGP(addr, val, mask) \
+do { \
+ GLuint tmp = INREG(addr); \
+ tmp &= (mask); \
+ tmp |= (val); \
+ OUTREG(addr, tmp); \
+} while (0)
+
+#define INPLL(dpy, addr) RADEONINPLL(dpy, addr)
+
+#define OUTPLL(addr, val) \
+do { \
+ OUTREG8(RADEON_CLOCK_CNTL_INDEX, (((addr) & 0x3f) | \
+ RADEON_PLL_WR_EN)); \
+ OUTREG(RADEON_CLOCK_CNTL_DATA, val); \
+} while (0)
+
+#define OUTPLLP(dpy, addr, val, mask) \
+do { \
+ GLuint tmp = INPLL(dpy, addr); \
+ tmp &= (mask); \
+ tmp |= (val); \
+ OUTPLL(addr, tmp); \
+} while (0)
+
+#define OUTPAL_START(idx) \
+do { \
+ OUTREG8(RADEON_PALETTE_INDEX, (idx)); \
+} while (0)
+
+#define OUTPAL_NEXT(r, g, b) \
+do { \
+ OUTREG(RADEON_PALETTE_DATA, ((r) << 16) | ((g) << 8) | (b)); \
+} while (0)
+
+#define OUTPAL_NEXT_CARD32(v) \
+do { \
+ OUTREG(RADEON_PALETTE_DATA, (v & 0x00ffffff)); \
+} while (0)
+
+#define OUTPAL(idx, r, g, b) \
+do { \
+ OUTPAL_START((idx)); \
+ OUTPAL_NEXT((r), (g), (b)); \
+} while (0)
+
+#define INPAL_START(idx) \
+do { \
+ OUTREG(RADEON_PALETTE_INDEX, (idx) << 16); \
+} while (0)
+
+#define INPAL_NEXT() INREG(RADEON_PALETTE_DATA)
+
+#define PAL_SELECT(idx) \
+do { \
+ if (!idx) { \
+ OUTREG(RADEON_DAC_CNTL2, INREG(RADEON_DAC_CNTL2) & \
+ (GLuint)~RADEON_DAC2_PALETTE_ACC_CTL); \
+ } else { \
+ OUTREG(RADEON_DAC_CNTL2, INREG(RADEON_DAC_CNTL2) | \
+ RADEON_DAC2_PALETTE_ACC_CTL); \
+ } \
+} while (0)
+
+
+#endif
diff --git a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_reg.h b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_reg.h
index e2349dcb685..7ba5b38c4cb 120000..100644
--- a/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_reg.h
+++ b/chromium/third_party/mesa/src/src/mesa/drivers/dri/r200/server/radeon_reg.h
@@ -1 +1,2163 @@
-../../radeon/server/radeon_reg.h \ No newline at end of file
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ * VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ * Kevin E. Martin <martin@xfree86.org>
+ * Rickard E. Faith <faith@valinux.com>
+ * Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ * References:
+ *
+ * !!!! FIXME !!!!
+ * RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
+ * Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
+ * 1999.
+ *
+ * !!!! FIXME !!!!
+ * RAGE 128 Software Development Manual (Technical Reference Manual P/N
+ * SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
+ *
+ */
+
+/* !!!! FIXME !!!! NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
+ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
+ * ON THE RADEON. A FULL AUDIT OF THIS CODE IS NEEDED! */
+
+#ifndef _RADEON_REG_H_
+#define _RADEON_REG_H_
+
+ /* Registers for 2D/Video/Overlay */
+#define RADEON_ADAPTER_ID 0x0f2c /* PCI */
+#define RADEON_AGP_BASE 0x0170
+#define RADEON_AGP_CNTL 0x0174
+# define RADEON_AGP_APER_SIZE_256MB (0x00 << 0)
+# define RADEON_AGP_APER_SIZE_128MB (0x20 << 0)
+# define RADEON_AGP_APER_SIZE_64MB (0x30 << 0)
+# define RADEON_AGP_APER_SIZE_32MB (0x38 << 0)
+# define RADEON_AGP_APER_SIZE_16MB (0x3c << 0)
+# define RADEON_AGP_APER_SIZE_8MB (0x3e << 0)
+# define RADEON_AGP_APER_SIZE_4MB (0x3f << 0)
+# define RADEON_AGP_APER_SIZE_MASK (0x3f << 0)
+#define RADEON_AGP_COMMAND 0x0f60 /* PCI */
+#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/
+# define RADEON_AGP_ENABLE (1<<8)
+#define RADEON_AGP_PLL_CNTL 0x000b /* PLL */
+#define RADEON_AGP_STATUS 0x0f5c /* PCI */
+# define RADEON_AGP_1X_MODE 0x01
+# define RADEON_AGP_2X_MODE 0x02
+# define RADEON_AGP_4X_MODE 0x04
+# define RADEON_AGP_FW_MODE 0x10
+# define RADEON_AGP_MODE_MASK 0x17
+#define RADEON_ATTRDR 0x03c1 /* VGA */
+#define RADEON_ATTRDW 0x03c0 /* VGA */
+#define RADEON_ATTRX 0x03c0 /* VGA */
+#define RADEON_AUX_SC_CNTL 0x1660
+# define RADEON_AUX1_SC_EN (1 << 0)
+# define RADEON_AUX1_SC_MODE_OR (0 << 1)
+# define RADEON_AUX1_SC_MODE_NAND (1 << 1)
+# define RADEON_AUX2_SC_EN (1 << 2)
+# define RADEON_AUX2_SC_MODE_OR (0 << 3)
+# define RADEON_AUX2_SC_MODE_NAND (1 << 3)
+# define RADEON_AUX3_SC_EN (1 << 4)
+# define RADEON_AUX3_SC_MODE_OR (0 << 5)
+# define RADEON_AUX3_SC_MODE_NAND (1 << 5)
+#define RADEON_AUX1_SC_BOTTOM 0x1670
+#define RADEON_AUX1_SC_LEFT 0x1664
+#define RADEON_AUX1_SC_RIGHT 0x1668
+#define RADEON_AUX1_SC_TOP 0x166c
+#define RADEON_AUX2_SC_BOTTOM 0x1680
+#define RADEON_AUX2_SC_LEFT 0x1674
+#define RADEON_AUX2_SC_RIGHT 0x1678
+#define RADEON_AUX2_SC_TOP 0x167c
+#define RADEON_AUX3_SC_BOTTOM 0x1690
+#define RADEON_AUX3_SC_LEFT 0x1684
+#define RADEON_AUX3_SC_RIGHT 0x1688
+#define RADEON_AUX3_SC_TOP 0x168c
+#define RADEON_AUX_WINDOW_HORZ_CNTL 0x02d8
+#define RADEON_AUX_WINDOW_VERT_CNTL 0x02dc
+
+#define RADEON_BASE_CODE 0x0f0b
+#define RADEON_BIOS_0_SCRATCH 0x0010
+#define RADEON_BIOS_1_SCRATCH 0x0014
+#define RADEON_BIOS_2_SCRATCH 0x0018
+#define RADEON_BIOS_3_SCRATCH 0x001c
+#define RADEON_BIOS_4_SCRATCH 0x0020
+#define RADEON_BIOS_5_SCRATCH 0x0024
+#define RADEON_BIOS_6_SCRATCH 0x0028
+#define RADEON_BIOS_7_SCRATCH 0x002c
+#define RADEON_BIOS_ROM 0x0f30 /* PCI */
+#define RADEON_BIST 0x0f0f /* PCI */
+#define RADEON_BRUSH_DATA0 0x1480
+#define RADEON_BRUSH_DATA1 0x1484
+#define RADEON_BRUSH_DATA10 0x14a8
+#define RADEON_BRUSH_DATA11 0x14ac
+#define RADEON_BRUSH_DATA12 0x14b0
+#define RADEON_BRUSH_DATA13 0x14b4
+#define RADEON_BRUSH_DATA14 0x14b8
+#define RADEON_BRUSH_DATA15 0x14bc
+#define RADEON_BRUSH_DATA16 0x14c0
+#define RADEON_BRUSH_DATA17 0x14c4
+#define RADEON_BRUSH_DATA18 0x14c8
+#define RADEON_BRUSH_DATA19 0x14cc
+#define RADEON_BRUSH_DATA2 0x1488
+#define RADEON_BRUSH_DATA20 0x14d0
+#define RADEON_BRUSH_DATA21 0x14d4
+#define RADEON_BRUSH_DATA22 0x14d8
+#define RADEON_BRUSH_DATA23 0x14dc
+#define RADEON_BRUSH_DATA24 0x14e0
+#define RADEON_BRUSH_DATA25 0x14e4
+#define RADEON_BRUSH_DATA26 0x14e8
+#define RADEON_BRUSH_DATA27 0x14ec
+#define RADEON_BRUSH_DATA28 0x14f0
+#define RADEON_BRUSH_DATA29 0x14f4
+#define RADEON_BRUSH_DATA3 0x148c
+#define RADEON_BRUSH_DATA30 0x14f8
+#define RADEON_BRUSH_DATA31 0x14fc
+#define RADEON_BRUSH_DATA32 0x1500
+#define RADEON_BRUSH_DATA33 0x1504
+#define RADEON_BRUSH_DATA34 0x1508
+#define RADEON_BRUSH_DATA35 0x150c
+#define RADEON_BRUSH_DATA36 0x1510
+#define RADEON_BRUSH_DATA37 0x1514
+#define RADEON_BRUSH_DATA38 0x1518
+#define RADEON_BRUSH_DATA39 0x151c
+#define RADEON_BRUSH_DATA4 0x1490
+#define RADEON_BRUSH_DATA40 0x1520
+#define RADEON_BRUSH_DATA41 0x1524
+#define RADEON_BRUSH_DATA42 0x1528
+#define RADEON_BRUSH_DATA43 0x152c
+#define RADEON_BRUSH_DATA44 0x1530
+#define RADEON_BRUSH_DATA45 0x1534
+#define RADEON_BRUSH_DATA46 0x1538
+#define RADEON_BRUSH_DATA47 0x153c
+#define RADEON_BRUSH_DATA48 0x1540
+#define RADEON_BRUSH_DATA49 0x1544
+#define RADEON_BRUSH_DATA5 0x1494
+#define RADEON_BRUSH_DATA50 0x1548
+#define RADEON_BRUSH_DATA51 0x154c
+#define RADEON_BRUSH_DATA52 0x1550
+#define RADEON_BRUSH_DATA53 0x1554
+#define RADEON_BRUSH_DATA54 0x1558
+#define RADEON_BRUSH_DATA55 0x155c
+#define RADEON_BRUSH_DATA56 0x1560
+#define RADEON_BRUSH_DATA57 0x1564
+#define RADEON_BRUSH_DATA58 0x1568
+#define RADEON_BRUSH_DATA59 0x156c
+#define RADEON_BRUSH_DATA6 0x1498
+#define RADEON_BRUSH_DATA60 0x1570
+#define RADEON_BRUSH_DATA61 0x1574
+#define RADEON_BRUSH_DATA62 0x1578
+#define RADEON_BRUSH_DATA63 0x157c
+#define RADEON_BRUSH_DATA7 0x149c
+#define RADEON_BRUSH_DATA8 0x14a0
+#define RADEON_BRUSH_DATA9 0x14a4
+#define RADEON_BRUSH_SCALE 0x1470
+#define RADEON_BRUSH_Y_X 0x1474
+#define RADEON_BUS_CNTL 0x0030
+# define RADEON_BUS_MASTER_DIS (1 << 6)
+# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
+# define RADEON_BUS_RD_ABORT_EN (1 << 25)
+# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
+# define RADEON_BUS_WRT_BURST (1 << 29)
+# define RADEON_BUS_READ_BURST (1 << 30)
+#define RADEON_BUS_CNTL1 0x0034
+# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+
+#define RADEON_CACHE_CNTL 0x1724
+#define RADEON_CACHE_LINE 0x0f0c /* PCI */
+#define RADEON_CAP0_TRIG_CNTL 0x0950 /* ? */
+#define RADEON_CAP1_TRIG_CNTL 0x09c0 /* ? */
+#define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */
+#define RADEON_CAPABILITIES_PTR 0x0f34 /* PCI */
+#define RADEON_CLK_PIN_CNTL 0x0001 /* PLL */
+#define RADEON_CLOCK_CNTL_DATA 0x000c
+#define RADEON_CLOCK_CNTL_INDEX 0x0008
+# define RADEON_PLL_WR_EN (1 << 7)
+# define RADEON_PLL_DIV_SEL (3 << 8)
+# define RADEON_PLL2_DIV_SEL_MASK ~(3 << 8)
+#define RADEON_CLR_CMP_CLR_3D 0x1a24
+#define RADEON_CLR_CMP_CLR_DST 0x15c8
+#define RADEON_CLR_CMP_CLR_SRC 0x15c4
+#define RADEON_CLR_CMP_CNTL 0x15c0
+# define RADEON_SRC_CMP_EQ_COLOR (4 << 0)
+# define RADEON_SRC_CMP_NEQ_COLOR (5 << 0)
+# define RADEON_CLR_CMP_SRC_SOURCE (1 << 24)
+#define RADEON_CLR_CMP_MASK 0x15cc
+# define RADEON_CLR_CMP_MSK 0xffffffff
+#define RADEON_CLR_CMP_MASK_3D 0x1A28
+#define RADEON_COMMAND 0x0f04 /* PCI */
+#define RADEON_COMPOSITE_SHADOW_ID 0x1a0c
+#define RADEON_CONFIG_APER_0_BASE 0x0100
+#define RADEON_CONFIG_APER_1_BASE 0x0104
+#define RADEON_CONFIG_APER_SIZE 0x0108
+#define RADEON_CONFIG_BONDS 0x00e8
+#define RADEON_CONFIG_CNTL 0x00e0
+# define RADEON_CFG_ATI_REV_A11 (0 << 16)
+# define RADEON_CFG_ATI_REV_A12 (1 << 16)
+# define RADEON_CFG_ATI_REV_A13 (2 << 16)
+# define RADEON_CFG_ATI_REV_ID_MASK (0xf << 16)
+#define RADEON_CONFIG_MEMSIZE 0x00f8
+#define RADEON_CONFIG_MEMSIZE_EMBEDDED 0x0114
+#define RADEON_CONFIG_REG_1_BASE 0x010c
+#define RADEON_CONFIG_REG_APER_SIZE 0x0110
+#define RADEON_CONFIG_XSTRAP 0x00e4
+#define RADEON_CONSTANT_COLOR_C 0x1d34
+# define RADEON_CONSTANT_COLOR_MASK 0x00ffffff
+# define RADEON_CONSTANT_COLOR_ONE 0x00ffffff
+# define RADEON_CONSTANT_COLOR_ZERO 0x00000000
+#define RADEON_CRC_CMDFIFO_ADDR 0x0740
+#define RADEON_CRC_CMDFIFO_DOUT 0x0744
+#define RADEON_GRPH_BUFFER_CNTL 0x02f0
+# define RADEON_GRPH_START_REQ_MASK (0x7f)
+# define RADEON_GRPH_START_REQ_SHIFT 0
+# define RADEON_GRPH_STOP_REQ_MASK (0x7f<<8)
+# define RADEON_GRPH_STOP_REQ_SHIFT 8
+# define RADEON_GRPH_CRITICAL_POINT_MASK (0x7f<<16)
+# define RADEON_GRPH_CRITICAL_POINT_SHIFT 16
+# define RADEON_GRPH_CRITICAL_CNTL (1<<28)
+# define RADEON_GRPH_BUFFER_SIZE (1<<29)
+# define RADEON_GRPH_CRITICAL_AT_SOF (1<<30)
+# define RADEON_GRPH_STOP_CNTL (1<<31)
+#define RADEON_GRPH2_BUFFER_CNTL 0x03f0
+# define RADEON_GRPH2_START_REQ_MASK (0x7f)
+# define RADEON_GRPH2_START_REQ_SHIFT 0
+# define RADEON_GRPH2_STOP_REQ_MASK (0x7f<<8)
+# define RADEON_GRPH2_STOP_REQ_SHIFT 8
+# define RADEON_GRPH2_CRITICAL_POINT_MASK (0x7f<<16)
+# define RADEON_GRPH2_CRITICAL_POINT_SHIFT 16
+# define RADEON_GRPH2_CRITICAL_CNTL (1<<28)
+# define RADEON_GRPH2_BUFFER_SIZE (1<<29)
+# define RADEON_GRPH2_CRITICAL_AT_SOF (1<<30)
+# define RADEON_GRPH2_STOP_CNTL (1<<31)
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC_EXT_CNTL 0x0054
+# define RADEON_CRTC_VGA_XOVERSCAN (1 << 0)
+# define RADEON_VGA_ATI_LINEAR (1 << 3)
+# define RADEON_XCRT_CNT_EN (1 << 6)
+# define RADEON_CRTC_HSYNC_DIS (1 << 8)
+# define RADEON_CRTC_VSYNC_DIS (1 << 9)
+# define RADEON_CRTC_DISPLAY_DIS (1 << 10)
+# define RADEON_CRTC_SYNC_TRISTAT (1 << 11)
+# define RADEON_CRTC_CRT_ON (1 << 15)
+#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055
+# define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0)
+# define RADEON_CRTC_VSYNC_DIS_BYTE (1 << 1)
+# define RADEON_CRTC_DISPLAY_DIS_BYTE (1 << 2)
+#define RADEON_CRTC_GEN_CNTL 0x0050
+# define RADEON_CRTC_DBL_SCAN_EN (1 << 0)
+# define RADEON_CRTC_INTERLACE_EN (1 << 1)
+# define RADEON_CRTC_CSYNC_EN (1 << 4)
+# define RADEON_CRTC_CUR_EN (1 << 16)
+# define RADEON_CRTC_CUR_MODE_MASK (7 << 17)
+# define RADEON_CRTC_ICON_EN (1 << 20)
+# define RADEON_CRTC_EXT_DISP_EN (1 << 24)
+# define RADEON_CRTC_EN (1 << 25)
+# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26)
+#define RADEON_CRTC2_GEN_CNTL 0x03f8
+# define RADEON_CRTC2_DBL_SCAN_EN (1 << 0)
+# define RADEON_CRTC2_INTERLACE_EN (1 << 1)
+# define RADEON_CRTC2_SYNC_TRISTAT (1 << 4)
+# define RADEON_CRTC2_HSYNC_TRISTAT (1 << 5)
+# define RADEON_CRTC2_VSYNC_TRISTAT (1 << 6)
+# define RADEON_CRTC2_CRT2_ON (1 << 7)
+# define RADEON_CRTC2_ICON_EN (1 << 15)
+# define RADEON_CRTC2_CUR_EN (1 << 16)
+# define RADEON_CRTC2_CUR_MODE_MASK (7 << 20)
+# define RADEON_CRTC2_DISP_DIS (1 << 23)
+# define RADEON_CRTC2_EN (1 << 25)
+# define RADEON_CRTC2_DISP_REQ_EN_B (1 << 26)
+# define RADEON_CRTC2_CSYNC_EN (1 << 27)
+# define RADEON_CRTC2_HSYNC_DIS (1 << 28)
+# define RADEON_CRTC2_VSYNC_DIS (1 << 29)
+#define RADEON_CRTC_MORE_CNTL 0x27c
+# define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
+# define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
+#define RADEON_CRTC_GUI_TRIG_VLINE 0x0218
+#define RADEON_CRTC_H_SYNC_STRT_WID 0x0204
+# define RADEON_CRTC_H_SYNC_STRT_PIX (0x07 << 0)
+# define RADEON_CRTC_H_SYNC_STRT_CHAR (0x3ff << 3)
+# define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
+# define RADEON_CRTC_H_SYNC_WID (0x3f << 16)
+# define RADEON_CRTC_H_SYNC_WID_SHIFT 16
+# define RADEON_CRTC_H_SYNC_POL (1 << 23)
+#define RADEON_CRTC2_H_SYNC_STRT_WID 0x0304
+# define RADEON_CRTC2_H_SYNC_STRT_PIX (0x07 << 0)
+# define RADEON_CRTC2_H_SYNC_STRT_CHAR (0x3ff << 3)
+# define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
+# define RADEON_CRTC2_H_SYNC_WID (0x3f << 16)
+# define RADEON_CRTC2_H_SYNC_WID_SHIFT 16
+# define RADEON_CRTC2_H_SYNC_POL (1 << 23)
+#define RADEON_CRTC_H_TOTAL_DISP 0x0200
+# define RADEON_CRTC_H_TOTAL (0x03ff << 0)
+# define RADEON_CRTC_H_TOTAL_SHIFT 0
+# define RADEON_CRTC_H_DISP (0x01ff << 16)
+# define RADEON_CRTC_H_DISP_SHIFT 16
+#define RADEON_CRTC2_H_TOTAL_DISP 0x0300
+# define RADEON_CRTC2_H_TOTAL (0x03ff << 0)
+# define RADEON_CRTC2_H_TOTAL_SHIFT 0
+# define RADEON_CRTC2_H_DISP (0x01ff << 16)
+# define RADEON_CRTC2_H_DISP_SHIFT 16
+#define RADEON_CRTC_OFFSET 0x0224
+#define RADEON_CRTC2_OFFSET 0x0324
+#define RADEON_CRTC_OFFSET_CNTL 0x0228
+# define RADEON_CRTC_TILE_EN (1 << 15)
+#define RADEON_CRTC2_OFFSET_CNTL 0x0328
+# define RADEON_CRTC2_TILE_EN (1 << 15)
+#define RADEON_CRTC_PITCH 0x022c
+#define RADEON_CRTC2_PITCH 0x032c
+#define RADEON_CRTC_STATUS 0x005c
+# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
+# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
+#define RADEON_CRTC2_STATUS 0x03fc
+# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
+# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
+#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
+# define RADEON_CRTC_V_SYNC_STRT (0x7ff << 0)
+# define RADEON_CRTC_V_SYNC_STRT_SHIFT 0
+# define RADEON_CRTC_V_SYNC_WID (0x1f << 16)
+# define RADEON_CRTC_V_SYNC_WID_SHIFT 16
+# define RADEON_CRTC_V_SYNC_POL (1 << 23)
+#define RADEON_CRTC2_V_SYNC_STRT_WID 0x030c
+# define RADEON_CRTC2_V_SYNC_STRT (0x7ff << 0)
+# define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
+# define RADEON_CRTC2_V_SYNC_WID (0x1f << 16)
+# define RADEON_CRTC2_V_SYNC_WID_SHIFT 16
+# define RADEON_CRTC2_V_SYNC_POL (1 << 23)
+#define RADEON_CRTC_V_TOTAL_DISP 0x0208
+# define RADEON_CRTC_V_TOTAL (0x07ff << 0)
+# define RADEON_CRTC_V_TOTAL_SHIFT 0
+# define RADEON_CRTC_V_DISP (0x07ff << 16)
+# define RADEON_CRTC_V_DISP_SHIFT 16
+#define RADEON_CRTC2_V_TOTAL_DISP 0x0308
+# define RADEON_CRTC2_V_TOTAL (0x07ff << 0)
+# define RADEON_CRTC2_V_TOTAL_SHIFT 0
+# define RADEON_CRTC2_V_DISP (0x07ff << 16)
+# define RADEON_CRTC2_V_DISP_SHIFT 16
+#define RADEON_CRTC_VLINE_CRNT_VLINE 0x0210
+# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
+#define RADEON_CRTC2_STATUS 0x03fc
+#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
+#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
+#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
+#define RADEON_CUR_CLR0 0x026c
+#define RADEON_CUR_CLR1 0x0270
+#define RADEON_CUR_HORZ_VERT_OFF 0x0268
+#define RADEON_CUR_HORZ_VERT_POSN 0x0264
+#define RADEON_CUR_OFFSET 0x0260
+# define RADEON_CUR_LOCK (1 << 31)
+#define RADEON_CUR2_CLR0 0x036c
+#define RADEON_CUR2_CLR1 0x0370
+#define RADEON_CUR2_HORZ_VERT_OFF 0x0368
+#define RADEON_CUR2_HORZ_VERT_POSN 0x0364
+#define RADEON_CUR2_OFFSET 0x0360
+# define RADEON_CUR2_LOCK (1 << 31)
+
+#define RADEON_DAC_CNTL 0x0058
+# define RADEON_DAC_RANGE_CNTL (3 << 0)
+# define RADEON_DAC_RANGE_CNTL_MASK 0x03
+# define RADEON_DAC_BLANKING (1 << 2)
+# define RADEON_DAC_CMP_EN (1 << 3)
+# define RADEON_DAC_CMP_OUTPUT (1 << 7)
+# define RADEON_DAC_8BIT_EN (1 << 8)
+# define RADEON_DAC_VGA_ADR_EN (1 << 13)
+# define RADEON_DAC_PDWN (1 << 15)
+# define RADEON_DAC_MASK_ALL (0xff << 24)
+#define RADEON_DAC_CNTL2 0x007c
+# define RADEON_DAC2_DAC_CLK_SEL (1 << 0)
+# define RADEON_DAC2_DAC2_CLK_SEL (1 << 1)
+# define RADEON_DAC2_PALETTE_ACC_CTL (1 << 5)
+#define RADEON_DAC_EXT_CNTL 0x0280
+# define RADEON_DAC_FORCE_BLANK_OFF_EN (1 << 4)
+# define RADEON_DAC_FORCE_DATA_EN (1 << 5)
+# define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
+# define RADEON_DAC_FORCE_DATA_MASK 0x0003ff00
+# define RADEON_DAC_FORCE_DATA_SHIFT 8
+#define RADEON_TV_DAC_CNTL 0x088c
+# define RADEON_TV_DAC_STD_MASK 0x0300
+# define RADEON_TV_DAC_RDACPD (1 << 24)
+# define RADEON_TV_DAC_GDACPD (1 << 25)
+# define RADEON_TV_DAC_BDACPD (1 << 26)
+#define RADEON_DISP_HW_DEBUG 0x0d14
+# define RADEON_CRT2_DISP1_SEL (1 << 5)
+#define RADEON_DISP_OUTPUT_CNTL 0x0d64
+# define RADEON_DISP_DAC_SOURCE_MASK 0x03
+# define RADEON_DISP_DAC2_SOURCE_MASK 0x0c
+# define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
+# define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
+#define RADEON_DAC_CRC_SIG 0x02cc
+#define RADEON_DAC_DATA 0x03c9 /* VGA */
+#define RADEON_DAC_MASK 0x03c6 /* VGA */
+#define RADEON_DAC_R_INDEX 0x03c7 /* VGA */
+#define RADEON_DAC_W_INDEX 0x03c8 /* VGA */
+#define RADEON_DDA_CONFIG 0x02e0
+#define RADEON_DDA_ON_OFF 0x02e4
+#define RADEON_DEFAULT_OFFSET 0x16e0
+#define RADEON_DEFAULT_PITCH 0x16e4
+#define RADEON_DEFAULT_SC_BOTTOM_RIGHT 0x16e8
+# define RADEON_DEFAULT_SC_RIGHT_MAX (0x1fff << 0)
+# define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
+#define RADEON_DESTINATION_3D_CLR_CMP_VAL 0x1820
+#define RADEON_DESTINATION_3D_CLR_CMP_MSK 0x1824
+#define RADEON_DEVICE_ID 0x0f02 /* PCI */
+#define RADEON_DISP_MISC_CNTL 0x0d00
+# define RADEON_SOFT_RESET_GRPH_PP (1 << 0)
+#define RADEON_DISP_MERGE_CNTL 0x0d60
+# define RADEON_DISP_ALPHA_MODE_MASK 0x03
+# define RADEON_DISP_ALPHA_MODE_KEY 0
+# define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
+# define RADEON_DISP_ALPHA_MODE_GLOBAL 2
+# define RADEON_DISP_RGB_OFFSET_EN (1<<8)
+# define RADEON_DISP_GRPH_ALPHA_MASK (0xff << 16)
+# define RADEON_DISP_OV0_ALPHA_MASK (0xff << 24)
+# define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
+#define RADEON_DISP2_MERGE_CNTL 0x0d68
+# define RADEON_DISP2_RGB_OFFSET_EN (1<<8)
+#define RADEON_DISP_LIN_TRANS_GRPH_A 0x0d80
+#define RADEON_DISP_LIN_TRANS_GRPH_B 0x0d84
+#define RADEON_DISP_LIN_TRANS_GRPH_C 0x0d88
+#define RADEON_DISP_LIN_TRANS_GRPH_D 0x0d8c
+#define RADEON_DISP_LIN_TRANS_GRPH_E 0x0d90
+#define RADEON_DISP_LIN_TRANS_GRPH_F 0x0d98
+#define RADEON_DP_BRUSH_BKGD_CLR 0x1478
+#define RADEON_DP_BRUSH_FRGD_CLR 0x147c
+#define RADEON_DP_CNTL 0x16c0
+# define RADEON_DST_X_LEFT_TO_RIGHT (1 << 0)
+# define RADEON_DST_Y_TOP_TO_BOTTOM (1 << 1)
+#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0
+# define RADEON_DST_Y_MAJOR (1 << 2)
+# define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
+# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
+#define RADEON_DP_DATATYPE 0x16c4
+# define RADEON_HOST_BIG_ENDIAN_EN (1 << 29)
+#define RADEON_DP_GUI_MASTER_CNTL 0x146c
+# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
+# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
+# define RADEON_GMC_SRC_CLIPPING (1 << 2)
+# define RADEON_GMC_DST_CLIPPING (1 << 3)
+# define RADEON_GMC_BRUSH_DATATYPE_MASK (0x0f << 4)
+# define RADEON_GMC_BRUSH_8X8_MONO_FG_BG (0 << 4)
+# define RADEON_GMC_BRUSH_8X8_MONO_FG_LA (1 << 4)
+# define RADEON_GMC_BRUSH_1X8_MONO_FG_BG (4 << 4)
+# define RADEON_GMC_BRUSH_1X8_MONO_FG_LA (5 << 4)
+# define RADEON_GMC_BRUSH_32x1_MONO_FG_BG (6 << 4)
+# define RADEON_GMC_BRUSH_32x1_MONO_FG_LA (7 << 4)
+# define RADEON_GMC_BRUSH_32x32_MONO_FG_BG (8 << 4)
+# define RADEON_GMC_BRUSH_32x32_MONO_FG_LA (9 << 4)
+# define RADEON_GMC_BRUSH_8x8_COLOR (10 << 4)
+# define RADEON_GMC_BRUSH_1X8_COLOR (12 << 4)
+# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
+# define RADEON_GMC_BRUSH_NONE (15 << 4)
+# define RADEON_GMC_DST_8BPP_CI (2 << 8)
+# define RADEON_GMC_DST_15BPP (3 << 8)
+# define RADEON_GMC_DST_16BPP (4 << 8)
+# define RADEON_GMC_DST_24BPP (5 << 8)
+# define RADEON_GMC_DST_32BPP (6 << 8)
+# define RADEON_GMC_DST_8BPP_RGB (7 << 8)
+# define RADEON_GMC_DST_Y8 (8 << 8)
+# define RADEON_GMC_DST_RGB8 (9 << 8)
+# define RADEON_GMC_DST_VYUY (11 << 8)
+# define RADEON_GMC_DST_YVYU (12 << 8)
+# define RADEON_GMC_DST_AYUV444 (14 << 8)
+# define RADEON_GMC_DST_ARGB4444 (15 << 8)
+# define RADEON_GMC_DST_DATATYPE_MASK (0x0f << 8)
+# define RADEON_GMC_DST_DATATYPE_SHIFT 8
+# define RADEON_GMC_SRC_DATATYPE_MASK (3 << 12)
+# define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12)
+# define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12)
+# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
+# define RADEON_GMC_BYTE_PIX_ORDER (1 << 14)
+# define RADEON_GMC_BYTE_MSB_TO_LSB (0 << 14)
+# define RADEON_GMC_BYTE_LSB_TO_MSB (1 << 14)
+# define RADEON_GMC_CONVERSION_TEMP (1 << 15)
+# define RADEON_GMC_CONVERSION_TEMP_6500 (0 << 15)
+# define RADEON_GMC_CONVERSION_TEMP_9300 (1 << 15)
+# define RADEON_GMC_ROP3_MASK (0xff << 16)
+# define RADEON_DP_SRC_SOURCE_MASK (7 << 24)
+# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
+# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
+# define RADEON_GMC_3D_FCN_EN (1 << 27)
+# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
+# define RADEON_GMC_AUX_CLIP_DIS (1 << 29)
+# define RADEON_GMC_WR_MSK_DIS (1 << 30)
+# define RADEON_GMC_LD_BRUSH_Y_X (1 << 31)
+# define RADEON_ROP3_ZERO 0x00000000
+# define RADEON_ROP3_DSa 0x00880000
+# define RADEON_ROP3_SDna 0x00440000
+# define RADEON_ROP3_S 0x00cc0000
+# define RADEON_ROP3_DSna 0x00220000
+# define RADEON_ROP3_D 0x00aa0000
+# define RADEON_ROP3_DSx 0x00660000
+# define RADEON_ROP3_DSo 0x00ee0000
+# define RADEON_ROP3_DSon 0x00110000
+# define RADEON_ROP3_DSxn 0x00990000
+# define RADEON_ROP3_Dn 0x00550000
+# define RADEON_ROP3_SDno 0x00dd0000
+# define RADEON_ROP3_Sn 0x00330000
+# define RADEON_ROP3_DSno 0x00bb0000
+# define RADEON_ROP3_DSan 0x00770000
+# define RADEON_ROP3_ONE 0x00ff0000
+# define RADEON_ROP3_DPa 0x00a00000
+# define RADEON_ROP3_PDna 0x00500000
+# define RADEON_ROP3_P 0x00f00000
+# define RADEON_ROP3_DPna 0x000a0000
+# define RADEON_ROP3_D 0x00aa0000
+# define RADEON_ROP3_DPx 0x005a0000
+# define RADEON_ROP3_DPo 0x00fa0000
+# define RADEON_ROP3_DPon 0x00050000
+# define RADEON_ROP3_PDxn 0x00a50000
+# define RADEON_ROP3_PDno 0x00f50000
+# define RADEON_ROP3_Pn 0x000f0000
+# define RADEON_ROP3_DPno 0x00af0000
+# define RADEON_ROP3_DPan 0x005f0000
+#define RADEON_DP_GUI_MASTER_CNTL_C 0x1c84
+#define RADEON_DP_MIX 0x16c8
+#define RADEON_DP_SRC_BKGD_CLR 0x15dc
+#define RADEON_DP_SRC_FRGD_CLR 0x15d8
+#define RADEON_DP_WRITE_MASK 0x16cc
+#define RADEON_DST_BRES_DEC 0x1630
+#define RADEON_DST_BRES_ERR 0x1628
+#define RADEON_DST_BRES_INC 0x162c
+#define RADEON_DST_BRES_LNTH 0x1634
+#define RADEON_DST_BRES_LNTH_SUB 0x1638
+#define RADEON_DST_HEIGHT 0x1410
+#define RADEON_DST_HEIGHT_WIDTH 0x143c
+#define RADEON_DST_HEIGHT_WIDTH_8 0x158c
+#define RADEON_DST_HEIGHT_WIDTH_BW 0x15b4
+#define RADEON_DST_HEIGHT_Y 0x15a0
+#define RADEON_DST_LINE_START 0x1600
+#define RADEON_DST_LINE_END 0x1604
+#define RADEON_DST_LINE_PATCOUNT 0x1608
+# define RADEON_BRES_CNTL_SHIFT 8
+#define RADEON_DST_OFFSET 0x1404
+#define RADEON_DST_PITCH 0x1408
+#define RADEON_DST_PITCH_OFFSET 0x142c
+#define RADEON_DST_PITCH_OFFSET_C 0x1c80
+# define RADEON_PITCH_SHIFT 21
+# define RADEON_DST_TILE_LINEAR (0 << 30)
+# define RADEON_DST_TILE_MACRO (1 << 30)
+# define RADEON_DST_TILE_MICRO (2 << 30)
+# define RADEON_DST_TILE_BOTH (3 << 30)
+#define RADEON_DST_WIDTH 0x140c
+#define RADEON_DST_WIDTH_HEIGHT 0x1598
+#define RADEON_DST_WIDTH_X 0x1588
+#define RADEON_DST_WIDTH_X_INCY 0x159c
+#define RADEON_DST_X 0x141c
+#define RADEON_DST_X_SUB 0x15a4
+#define RADEON_DST_X_Y 0x1594
+#define RADEON_DST_Y 0x1420
+#define RADEON_DST_Y_SUB 0x15a8
+#define RADEON_DST_Y_X 0x1438
+
+#define RADEON_FCP_CNTL 0x0910
+# define RADEON_FCP0_SRC_PCICLK 0
+# define RADEON_FCP0_SRC_PCLK 1
+# define RADEON_FCP0_SRC_PCLKb 2
+# define RADEON_FCP0_SRC_HREF 3
+# define RADEON_FCP0_SRC_GND 4
+# define RADEON_FCP0_SRC_HREFb 5
+#define RADEON_FLUSH_1 0x1704
+#define RADEON_FLUSH_2 0x1708
+#define RADEON_FLUSH_3 0x170c
+#define RADEON_FLUSH_4 0x1710
+#define RADEON_FLUSH_5 0x1714
+#define RADEON_FLUSH_6 0x1718
+#define RADEON_FLUSH_7 0x171c
+#define RADEON_FOG_3D_TABLE_START 0x1810
+#define RADEON_FOG_3D_TABLE_END 0x1814
+#define RADEON_FOG_3D_TABLE_DENSITY 0x181c
+#define RADEON_FOG_TABLE_INDEX 0x1a14
+#define RADEON_FOG_TABLE_DATA 0x1a18
+#define RADEON_FP_CRTC_H_TOTAL_DISP 0x0250
+#define RADEON_FP_CRTC_V_TOTAL_DISP 0x0254
+#define RADEON_FP_CRTC2_H_TOTAL_DISP 0x0350
+#define RADEON_FP_CRTC2_V_TOTAL_DISP 0x0354
+# define RADEON_FP_CRTC_H_TOTAL_MASK 0x000003ff
+# define RADEON_FP_CRTC_H_DISP_MASK 0x01ff0000
+# define RADEON_FP_CRTC_V_TOTAL_MASK 0x00000fff
+# define RADEON_FP_CRTC_V_DISP_MASK 0x0fff0000
+# define RADEON_FP_H_SYNC_STRT_CHAR_MASK 0x00001ff8
+# define RADEON_FP_H_SYNC_WID_MASK 0x003f0000
+# define RADEON_FP_V_SYNC_STRT_MASK 0x00000fff
+# define RADEON_FP_V_SYNC_WID_MASK 0x001f0000
+# define RADEON_FP_CRTC_H_TOTAL_SHIFT 0x00000000
+# define RADEON_FP_CRTC_H_DISP_SHIFT 0x00000010
+# define RADEON_FP_CRTC_V_TOTAL_SHIFT 0x00000000
+# define RADEON_FP_CRTC_V_DISP_SHIFT 0x00000010
+# define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
+# define RADEON_FP_H_SYNC_WID_SHIFT 0x00000010
+# define RADEON_FP_V_SYNC_STRT_SHIFT 0x00000000
+# define RADEON_FP_V_SYNC_WID_SHIFT 0x00000010
+#define RADEON_FP_GEN_CNTL 0x0284
+# define RADEON_FP_FPON (1 << 0)
+# define RADEON_FP_TMDS_EN (1 << 2)
+# define RADEON_FP_PANEL_FORMAT (1 << 3)
+# define RADEON_FP_EN_TMDS (1 << 7)
+# define RADEON_FP_DETECT_SENSE (1 << 8)
+# define RADEON_FP_SEL_CRTC2 (1 << 13)
+# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+# define RADEON_FP_CRTC_USE_SHADOW_VEND (1 << 18)
+# define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
+# define RADEON_FP_DFP_SYNC_SEL (1 << 21)
+# define RADEON_FP_CRTC_LOCK_8DOT (1 << 22)
+# define RADEON_FP_CRT_SYNC_SEL (1 << 23)
+# define RADEON_FP_USE_SHADOW_EN (1 << 24)
+# define RADEON_FP_CRT_SYNC_ALT (1 << 26)
+#define RADEON_FP2_GEN_CNTL 0x0288
+# define RADEON_FP2_BLANK_EN (1 << 1)
+# define RADEON_FP2_ON (1 << 2)
+# define RADEON_FP2_PANEL_FORMAT (1 << 3)
+# define RADEON_FP2_SOURCE_SEL_MASK (3 << 10)
+# define RADEON_FP2_SOURCE_SEL_CRTC2 (1 << 10)
+# define RADEON_FP2_SRC_SEL_MASK (3 << 13)
+# define RADEON_FP2_SRC_SEL_CRTC2 (1 << 13)
+# define RADEON_FP2_FP_POL (1 << 16)
+# define RADEON_FP2_LP_POL (1 << 17)
+# define RADEON_FP2_SCK_POL (1 << 18)
+# define RADEON_FP2_LCD_CNTL_MASK (7 << 19)
+# define RADEON_FP2_PAD_FLOP_EN (1 << 22)
+# define RADEON_FP2_CRC_EN (1 << 23)
+# define RADEON_FP2_CRC_READ_EN (1 << 24)
+# define RADEON_FP2_DV0_EN (1 << 25)
+# define RADEON_FP2_DV0_RATE_SEL_SDR (1 << 26)
+#define RADEON_FP_H_SYNC_STRT_WID 0x02c4
+#define RADEON_FP_H2_SYNC_STRT_WID 0x03c4
+#define RADEON_FP_HORZ_STRETCH 0x028c
+#define RADEON_FP_HORZ2_STRETCH 0x038c
+# define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
+# define RADEON_HORZ_STRETCH_RATIO_MAX 4096
+# define RADEON_HORZ_PANEL_SIZE (0x1ff << 16)
+# define RADEON_HORZ_PANEL_SHIFT 16
+# define RADEON_HORZ_STRETCH_PIXREP (0 << 25)
+# define RADEON_HORZ_STRETCH_BLEND (1 << 26)
+# define RADEON_HORZ_STRETCH_ENABLE (1 << 25)
+# define RADEON_HORZ_AUTO_RATIO (1 << 27)
+# define RADEON_HORZ_FP_LOOP_STRETCH (0x7 << 28)
+# define RADEON_HORZ_AUTO_RATIO_INC (1 << 31)
+#define RADEON_FP_V_SYNC_STRT_WID 0x02c8
+#define RADEON_FP_VERT_STRETCH 0x0290
+#define RADEON_FP_V2_SYNC_STRT_WID 0x03c8
+#define RADEON_FP_VERT2_STRETCH 0x0390
+# define RADEON_VERT_PANEL_SIZE (0xfff << 12)
+# define RADEON_VERT_PANEL_SHIFT 12
+# define RADEON_VERT_STRETCH_RATIO_MASK 0xfff
+# define RADEON_VERT_STRETCH_RATIO_SHIFT 0
+# define RADEON_VERT_STRETCH_RATIO_MAX 4096
+# define RADEON_VERT_STRETCH_ENABLE (1 << 25)
+# define RADEON_VERT_STRETCH_LINEREP (0 << 26)
+# define RADEON_VERT_STRETCH_BLEND (1 << 26)
+# define RADEON_VERT_AUTO_RATIO_EN (1 << 27)
+# define RADEON_VERT_STRETCH_RESERVED 0xf1000000
+
+#define RADEON_GEN_INT_CNTL 0x0040
+#define RADEON_GEN_INT_STATUS 0x0044
+# define RADEON_VSYNC_INT_AK (1 << 2)
+# define RADEON_VSYNC_INT (1 << 2)
+# define RADEON_VSYNC2_INT_AK (1 << 6)
+# define RADEON_VSYNC2_INT (1 << 6)
+#define RADEON_GENENB 0x03c3 /* VGA */
+#define RADEON_GENFC_RD 0x03ca /* VGA */
+#define RADEON_GENFC_WT 0x03da /* VGA, 0x03ba */
+#define RADEON_GENMO_RD 0x03cc /* VGA */
+#define RADEON_GENMO_WT 0x03c2 /* VGA */
+#define RADEON_GENS0 0x03c2 /* VGA */
+#define RADEON_GENS1 0x03da /* VGA, 0x03ba */
+#define RADEON_GPIO_MONID 0x0068 /* DDC interface via I2C */
+#define RADEON_GPIO_MONIDB 0x006c
+#define RADEON_GPIO_CRT2_DDC 0x006c
+#define RADEON_GPIO_DVI_DDC 0x0064
+#define RADEON_GPIO_VGA_DDC 0x0060
+# define RADEON_GPIO_A_0 (1 << 0)
+# define RADEON_GPIO_A_1 (1 << 1)
+# define RADEON_GPIO_Y_0 (1 << 8)
+# define RADEON_GPIO_Y_1 (1 << 9)
+# define RADEON_GPIO_Y_SHIFT_0 8
+# define RADEON_GPIO_Y_SHIFT_1 9
+# define RADEON_GPIO_EN_0 (1 << 16)
+# define RADEON_GPIO_EN_1 (1 << 17)
+# define RADEON_GPIO_MASK_0 (1 << 24) /*??*/
+# define RADEON_GPIO_MASK_1 (1 << 25) /*??*/
+#define RADEON_GRPH8_DATA 0x03cf /* VGA */
+#define RADEON_GRPH8_IDX 0x03ce /* VGA */
+#define RADEON_GUI_SCRATCH_REG0 0x15e0
+#define RADEON_GUI_SCRATCH_REG1 0x15e4
+#define RADEON_GUI_SCRATCH_REG2 0x15e8
+#define RADEON_GUI_SCRATCH_REG3 0x15ec
+#define RADEON_GUI_SCRATCH_REG4 0x15f0
+#define RADEON_GUI_SCRATCH_REG5 0x15f4
+
+#define RADEON_HEADER 0x0f0e /* PCI */
+#define RADEON_HOST_DATA0 0x17c0
+#define RADEON_HOST_DATA1 0x17c4
+#define RADEON_HOST_DATA2 0x17c8
+#define RADEON_HOST_DATA3 0x17cc
+#define RADEON_HOST_DATA4 0x17d0
+#define RADEON_HOST_DATA5 0x17d4
+#define RADEON_HOST_DATA6 0x17d8
+#define RADEON_HOST_DATA7 0x17dc
+#define RADEON_HOST_DATA_LAST 0x17e0
+#define RADEON_HOST_PATH_CNTL 0x0130
+# define RADEON_HDP_SOFT_RESET (1 << 26)
+#define RADEON_HTOTAL_CNTL 0x0009 /* PLL */
+#define RADEON_HTOTAL2_CNTL 0x002e /* PLL */
+
+#define RADEON_I2C_CNTL_1 0x0094 /* ? */
+#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
+#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
+#define RADEON_INTERRUPT_PIN 0x0f3d /* PCI */
+#define RADEON_IO_BASE 0x0f14 /* PCI */
+
+#define RADEON_LATENCY 0x0f0d /* PCI */
+#define RADEON_LEAD_BRES_DEC 0x1608
+#define RADEON_LEAD_BRES_LNTH 0x161c
+#define RADEON_LEAD_BRES_LNTH_SUB 0x1624
+#define RADEON_LVDS_GEN_CNTL 0x02d0
+# define RADEON_LVDS_ON (1 << 0)
+# define RADEON_LVDS_DISPLAY_DIS (1 << 1)
+# define RADEON_LVDS_PANEL_TYPE (1 << 2)
+# define RADEON_LVDS_PANEL_FORMAT (1 << 3)
+# define RADEON_LVDS_EN (1 << 7)
+# define RADEON_LVDS_DIGON (1 << 18)
+# define RADEON_LVDS_BLON (1 << 19)
+# define RADEON_LVDS_SEL_CRTC2 (1 << 23)
+#define RADEON_LVDS_PLL_CNTL 0x02d4
+# define RADEON_HSYNC_DELAY_SHIFT 28
+# define RADEON_HSYNC_DELAY_MASK (0xf << 28)
+
+#define RADEON_MAX_LATENCY 0x0f3f /* PCI */
+#define RADEON_MC_AGP_LOCATION 0x014c
+#define RADEON_MC_FB_LOCATION 0x0148
+#define RADEON_DISPLAY_BASE_ADDR 0x23c
+#define RADEON_DISPLAY2_BASE_ADDR 0x33c
+#define RADEON_OV0_BASE_ADDR 0x43c
+#define RADEON_NB_TOM 0x15c
+#define RADEON_MCLK_CNTL 0x0012 /* PLL */
+# define RADEON_FORCEON_MCLKA (1 << 16)
+# define RADEON_FORCEON_MCLKB (1 << 17)
+# define RADEON_FORCEON_YCLKA (1 << 18)
+# define RADEON_FORCEON_YCLKB (1 << 19)
+# define RADEON_FORCEON_MC (1 << 20)
+# define RADEON_FORCEON_AIC (1 << 21)
+#define RADEON_MDGPIO_A_REG 0x01ac
+#define RADEON_MDGPIO_EN_REG 0x01b0
+#define RADEON_MDGPIO_MASK 0x0198
+#define RADEON_MDGPIO_Y_REG 0x01b4
+#define RADEON_MEM_ADDR_CONFIG 0x0148
+#define RADEON_MEM_BASE 0x0f10 /* PCI */
+#define RADEON_MEM_CNTL 0x0140
+# define RADEON_MEM_NUM_CHANNELS_MASK 0x01
+# define RADEON_MEM_USE_B_CH_ONLY (1<<1)
+# define RV100_HALF_MODE (1<<3)
+#define RADEON_MEM_TIMING_CNTL 0x0144 /* EXT_MEM_CNTL */
+#define RADEON_MEM_INIT_LAT_TIMER 0x0154
+#define RADEON_MEM_INTF_CNTL 0x014c
+#define RADEON_MEM_SDRAM_MODE_REG 0x0158
+#define RADEON_MEM_STR_CNTL 0x0150
+#define RADEON_MEM_VGA_RP_SEL 0x003c
+#define RADEON_MEM_VGA_WP_SEL 0x0038
+#define RADEON_MIN_GRANT 0x0f3e /* PCI */
+#define RADEON_MM_DATA 0x0004
+#define RADEON_MM_INDEX 0x0000
+#define RADEON_MPLL_CNTL 0x000e /* PLL */
+#define RADEON_MPP_TB_CONFIG 0x01c0 /* ? */
+#define RADEON_MPP_GP_CONFIG 0x01c8 /* ? */
+
+#define RADEON_N_VIF_COUNT 0x0248
+
+#define RADEON_OV0_AUTO_FLIP_CNTL 0x0470
+#define RADEON_OV0_COLOUR_CNTL 0x04E0
+#define RADEON_OV0_DEINTERLACE_PATTERN 0x0474
+#define RADEON_OV0_EXCLUSIVE_HORZ 0x0408
+# define RADEON_EXCL_HORZ_START_MASK 0x000000ff
+# define RADEON_EXCL_HORZ_END_MASK 0x0000ff00
+# define RADEON_EXCL_HORZ_BACK_PORCH_MASK 0x00ff0000
+# define RADEON_EXCL_HORZ_EXCLUSIVE_EN 0x80000000
+#define RADEON_OV0_EXCLUSIVE_VERT 0x040C
+# define RADEON_EXCL_VERT_START_MASK 0x000003ff
+# define RADEON_EXCL_VERT_END_MASK 0x03ff0000
+#define RADEON_OV0_FILTER_CNTL 0x04A0
+#define RADEON_OV0_FOUR_TAP_COEF_0 0x04B0
+#define RADEON_OV0_FOUR_TAP_COEF_1 0x04B4
+#define RADEON_OV0_FOUR_TAP_COEF_2 0x04B8
+#define RADEON_OV0_FOUR_TAP_COEF_3 0x04BC
+#define RADEON_OV0_FOUR_TAP_COEF_4 0x04C0
+#define RADEON_OV0_GAMMA_000_00F 0x0d40
+#define RADEON_OV0_GAMMA_010_01F 0x0d44
+#define RADEON_OV0_GAMMA_020_03F 0x0d48
+#define RADEON_OV0_GAMMA_040_07F 0x0d4c
+#define RADEON_OV0_GAMMA_080_0BF 0x0e00
+#define RADEON_OV0_GAMMA_0C0_0FF 0x0e04
+#define RADEON_OV0_GAMMA_100_13F 0x0e08
+#define RADEON_OV0_GAMMA_140_17F 0x0e0c
+#define RADEON_OV0_GAMMA_180_1BF 0x0e10
+#define RADEON_OV0_GAMMA_1C0_1FF 0x0e14
+#define RADEON_OV0_GAMMA_200_23F 0x0e18
+#define RADEON_OV0_GAMMA_240_27F 0x0e1c
+#define RADEON_OV0_GAMMA_280_2BF 0x0e20
+#define RADEON_OV0_GAMMA_2C0_2FF 0x0e24
+#define RADEON_OV0_GAMMA_300_33F 0x0e28
+#define RADEON_OV0_GAMMA_340_37F 0x0e2c
+#define RADEON_OV0_GAMMA_380_3BF 0x0d50
+#define RADEON_OV0_GAMMA_3C0_3FF 0x0d54
+#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW 0x04EC
+#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH 0x04F0
+#define RADEON_OV0_H_INC 0x0480
+#define RADEON_OV0_KEY_CNTL 0x04F4
+# define RADEON_VIDEO_KEY_FN_MASK 0x00000003L
+# define RADEON_VIDEO_KEY_FN_FALSE 0x00000000L
+# define RADEON_VIDEO_KEY_FN_TRUE 0x00000001L
+# define RADEON_VIDEO_KEY_FN_EQ 0x00000002L
+# define RADEON_VIDEO_KEY_FN_NE 0x00000003L
+# define RADEON_GRAPHIC_KEY_FN_MASK 0x00000030L
+# define RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
+# define RADEON_GRAPHIC_KEY_FN_TRUE 0x00000010L
+# define RADEON_GRAPHIC_KEY_FN_EQ 0x00000020L
+# define RADEON_GRAPHIC_KEY_FN_NE 0x00000030L
+# define RADEON_CMP_MIX_MASK 0x00000100L
+# define RADEON_CMP_MIX_OR 0x00000000L
+# define RADEON_CMP_MIX_AND 0x00000100L
+#define RADEON_OV0_LIN_TRANS_A 0x0d20
+#define RADEON_OV0_LIN_TRANS_B 0x0d24
+#define RADEON_OV0_LIN_TRANS_C 0x0d28
+#define RADEON_OV0_LIN_TRANS_D 0x0d2c
+#define RADEON_OV0_LIN_TRANS_E 0x0d30
+#define RADEON_OV0_LIN_TRANS_F 0x0d34
+#define RADEON_OV0_P1_BLANK_LINES_AT_TOP 0x0430
+# define RADEON_P1_BLNK_LN_AT_TOP_M1_MASK 0x00000fffL
+# define RADEON_P1_ACTIVE_LINES_M1 0x0fff0000L
+#define RADEON_OV0_P1_H_ACCUM_INIT 0x0488
+#define RADEON_OV0_P1_V_ACCUM_INIT 0x0428
+# define RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
+# define RADEON_OV0_P1_V_ACCUM_INIT_MASK 0x01ff8000L
+#define RADEON_OV0_P1_X_START_END 0x0494
+#define RADEON_OV0_P2_X_START_END 0x0498
+#define RADEON_OV0_P23_BLANK_LINES_AT_TOP 0x0434
+# define RADEON_P23_BLNK_LN_AT_TOP_M1_MASK 0x000007ffL
+# define RADEON_P23_ACTIVE_LINES_M1 0x07ff0000L
+#define RADEON_OV0_P23_H_ACCUM_INIT 0x048C
+#define RADEON_OV0_P23_V_ACCUM_INIT 0x042C
+#define RADEON_OV0_P3_X_START_END 0x049C
+#define RADEON_OV0_REG_LOAD_CNTL 0x0410
+# define RADEON_REG_LD_CTL_LOCK 0x00000001L
+# define RADEON_REG_LD_CTL_VBLANK_DURING_LOCK 0x00000002L
+# define RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
+# define RADEON_REG_LD_CTL_LOCK_READBACK 0x00000008L
+#define RADEON_OV0_SCALE_CNTL 0x0420
+# define RADEON_SCALER_HORZ_PICK_NEAREST 0x00000004L
+# define RADEON_SCALER_VERT_PICK_NEAREST 0x00000008L
+# define RADEON_SCALER_SIGNED_UV 0x00000010L
+# define RADEON_SCALER_GAMMA_SEL_MASK 0x00000060L
+# define RADEON_SCALER_GAMMA_SEL_BRIGHT 0x00000000L
+# define RADEON_SCALER_GAMMA_SEL_G22 0x00000020L
+# define RADEON_SCALER_GAMMA_SEL_G18 0x00000040L
+# define RADEON_SCALER_GAMMA_SEL_G14 0x00000060L
+# define RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
+# define RADEON_SCALER_SURFAC_FORMAT 0x00000f00L
+# define RADEON_SCALER_SOURCE_15BPP 0x00000300L
+# define RADEON_SCALER_SOURCE_16BPP 0x00000400L
+# define RADEON_SCALER_SOURCE_32BPP 0x00000600L
+# define RADEON_SCALER_SOURCE_YUV9 0x00000900L
+# define RADEON_SCALER_SOURCE_YUV12 0x00000A00L
+# define RADEON_SCALER_SOURCE_VYUY422 0x00000B00L
+# define RADEON_SCALER_SOURCE_YVYU422 0x00000C00L
+# define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L
+# define RADEON_SCALER_TEMPORAL_DEINT 0x00002000L
+# define RADEON_SCALER_SMART_SWITCH 0x00008000L
+# define RADEON_SCALER_BURST_PER_PLANE 0x007F0000L
+# define RADEON_SCALER_DOUBLE_BUFFER 0x01000000L
+# define RADEON_SCALER_DIS_LIMIT 0x08000000L
+# define RADEON_SCALER_INT_EMU 0x20000000L
+# define RADEON_SCALER_ENABLE 0x40000000L
+# define RADEON_SCALER_SOFT_RESET 0x80000000L
+# define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L
+#define RADEON_OV0_STEP_BY 0x0484
+#define RADEON_OV0_TEST 0x04F8
+#define RADEON_OV0_V_INC 0x0424
+#define RADEON_OV0_VID_BUF_PITCH0_VALUE 0x0460
+#define RADEON_OV0_VID_BUF_PITCH1_VALUE 0x0464
+#define RADEON_OV0_VID_BUF0_BASE_ADRS 0x0440
+# define RADEON_VIF_BUF0_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF0_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF0_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF1_BASE_ADRS 0x0444
+# define RADEON_VIF_BUF1_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF1_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF1_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF2_BASE_ADRS 0x0448
+# define RADEON_VIF_BUF2_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF2_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF2_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF3_BASE_ADRS 0x044C
+#define RADEON_OV0_VID_BUF4_BASE_ADRS 0x0450
+#define RADEON_OV0_VID_BUF5_BASE_ADRS 0x0454
+#define RADEON_OV0_VIDEO_KEY_CLR_HIGH 0x04E8
+#define RADEON_OV0_VIDEO_KEY_CLR_LOW 0x04E4
+#define RADEON_OV0_Y_X_START 0x0400
+#define RADEON_OV0_Y_X_END 0x0404
+#define RADEON_OV1_Y_X_START 0x0600
+#define RADEON_OV1_Y_X_END 0x0604
+#define RADEON_OVR_CLR 0x0230
+#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
+#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
+
+#define RADEON_P2PLL_CNTL 0x002a /* P2PLL */
+# define RADEON_P2PLL_RESET (1 << 0)
+# define RADEON_P2PLL_SLEEP (1 << 1)
+# define RADEON_P2PLL_ATOMIC_UPDATE_EN (1 << 16)
+# define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+# define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC (1 << 18)
+#define RADEON_P2PLL_DIV_0 0x002c
+# define RADEON_P2PLL_FB0_DIV_MASK 0x07ff
+# define RADEON_P2PLL_POST0_DIV_MASK 0x00070000
+#define RADEON_P2PLL_REF_DIV 0x002B /* PLL */
+# define RADEON_P2PLL_REF_DIV_MASK 0x03ff
+# define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+# define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#define RADEON_PALETTE_DATA 0x00b4
+#define RADEON_PALETTE_30_DATA 0x00b8
+#define RADEON_PALETTE_INDEX 0x00b0
+#define RADEON_PCI_GART_PAGE 0x017c
+#define RADEON_PIXCLKS_CNTL 0x002d
+# define RADEON_PIX2CLK_SRC_SEL_MASK 0x03
+# define RADEON_PIX2CLK_SRC_SEL_CPUCLK 0x00
+# define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
+# define RADEON_PIX2CLK_SRC_SEL_BYTECLK 0x02
+# define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
+# define RADEON_PIX2CLK_ALWAYS_ONb (1<<6)
+# define RADEON_PIX2CLK_DAC_ALWAYS_ONb (1<<7)
+# define RADEON_PIXCLK_TV_SRC_SEL (1 << 8)
+# define RADEON_PIXCLK_LVDS_ALWAYS_ONb (1 << 14)
+# define RADEON_PIXCLK_TMDS_ALWAYS_ONb (1 << 15)
+#define RADEON_PLANE_3D_MASK_C 0x1d44
+#define RADEON_PLL_TEST_CNTL 0x0013 /* PLL */
+#define RADEON_PMI_CAP_ID 0x0f5c /* PCI */
+#define RADEON_PMI_DATA 0x0f63 /* PCI */
+#define RADEON_PMI_NXT_CAP_PTR 0x0f5d /* PCI */
+#define RADEON_PMI_PMC_REG 0x0f5e /* PCI */
+#define RADEON_PMI_PMCSR_REG 0x0f60 /* PCI */
+#define RADEON_PMI_REGISTER 0x0f5c /* PCI */
+#define RADEON_PPLL_CNTL 0x0002 /* PLL */
+# define RADEON_PPLL_RESET (1 << 0)
+# define RADEON_PPLL_SLEEP (1 << 1)
+# define RADEON_PPLL_ATOMIC_UPDATE_EN (1 << 16)
+# define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+# define RADEON_PPLL_ATOMIC_UPDATE_VSYNC (1 << 18)
+#define RADEON_PPLL_DIV_0 0x0004 /* PLL */
+#define RADEON_PPLL_DIV_1 0x0005 /* PLL */
+#define RADEON_PPLL_DIV_2 0x0006 /* PLL */
+#define RADEON_PPLL_DIV_3 0x0007 /* PLL */
+# define RADEON_PPLL_FB3_DIV_MASK 0x07ff
+# define RADEON_PPLL_POST3_DIV_MASK 0x00070000
+#define RADEON_PPLL_REF_DIV 0x0003 /* PLL */
+# define RADEON_PPLL_REF_DIV_MASK 0x03ff
+# define RADEON_PPLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+# define RADEON_PPLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#define RADEON_PWR_MNGMT_CNTL_STATUS 0x0f60 /* PCI */
+
+#define RADEON_RBBM_GUICNTL 0x172c
+# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
+# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
+# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
+# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
+#define RADEON_RBBM_SOFT_RESET 0x00f0
+# define RADEON_SOFT_RESET_CP (1 << 0)
+# define RADEON_SOFT_RESET_HI (1 << 1)
+# define RADEON_SOFT_RESET_SE (1 << 2)
+# define RADEON_SOFT_RESET_RE (1 << 3)
+# define RADEON_SOFT_RESET_PP (1 << 4)
+# define RADEON_SOFT_RESET_E2 (1 << 5)
+# define RADEON_SOFT_RESET_RB (1 << 6)
+# define RADEON_SOFT_RESET_HDP (1 << 7)
+#define RADEON_RBBM_STATUS 0x0e40
+# define RADEON_RBBM_FIFOCNT_MASK 0x007f
+# define RADEON_RBBM_ACTIVE (1 << 31)
+#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
+# define RADEON_RB2D_DC_FLUSH (3 << 0)
+# define RADEON_RB2D_DC_FREE (3 << 2)
+# define RADEON_RB2D_DC_FLUSH_ALL 0xf
+# define RADEON_RB2D_DC_BUSY (1 << 31)
+#define RADEON_RB2D_DSTCACHE_MODE 0x3428
+#define RADEON_REG_BASE 0x0f18 /* PCI */
+#define RADEON_REGPROG_INF 0x0f09 /* PCI */
+#define RADEON_REVISION_ID 0x0f08 /* PCI */
+
+#define RADEON_SC_BOTTOM 0x164c
+#define RADEON_SC_BOTTOM_RIGHT 0x16f0
+#define RADEON_SC_BOTTOM_RIGHT_C 0x1c8c
+#define RADEON_SC_LEFT 0x1640
+#define RADEON_SC_RIGHT 0x1644
+#define RADEON_SC_TOP 0x1648
+#define RADEON_SC_TOP_LEFT 0x16ec
+#define RADEON_SC_TOP_LEFT_C 0x1c88
+# define RADEON_SC_SIGN_MASK_LO 0x8000
+# define RADEON_SC_SIGN_MASK_HI 0x80000000
+#define RADEON_SCLK_CNTL 0x000d /* PLL */
+# define RADEON_DYN_STOP_LAT_MASK 0x00007ff8
+# define RADEON_CP_MAX_DYN_STOP_LAT 0x0008
+# define RADEON_SCLK_FORCEON_MASK 0xffff8000
+#define RADEON_SCLK_MORE_CNTL 0x0035 /* PLL */
+# define RADEON_SCLK_MORE_FORCEON 0x0700
+#define RADEON_SDRAM_MODE_REG 0x0158
+#define RADEON_SEQ8_DATA 0x03c5 /* VGA */
+#define RADEON_SEQ8_IDX 0x03c4 /* VGA */
+#define RADEON_SNAPSHOT_F_COUNT 0x0244
+#define RADEON_SNAPSHOT_VH_COUNTS 0x0240
+#define RADEON_SNAPSHOT_VIF_COUNT 0x024c
+#define RADEON_SRC_OFFSET 0x15ac
+#define RADEON_SRC_PITCH 0x15b0
+#define RADEON_SRC_PITCH_OFFSET 0x1428
+#define RADEON_SRC_SC_BOTTOM 0x165c
+#define RADEON_SRC_SC_BOTTOM_RIGHT 0x16f4
+#define RADEON_SRC_SC_RIGHT 0x1654
+#define RADEON_SRC_X 0x1414
+#define RADEON_SRC_X_Y 0x1590
+#define RADEON_SRC_Y 0x1418
+#define RADEON_SRC_Y_X 0x1434
+#define RADEON_STATUS 0x0f06 /* PCI */
+#define RADEON_SUBPIC_CNTL 0x0540 /* ? */
+#define RADEON_SUB_CLASS 0x0f0a /* PCI */
+#define RADEON_SURFACE_CNTL 0x0b00
+# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
+# define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
+# define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
+#define RADEON_SURFACE0_INFO 0x0b0c
+# define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
+# define RADEON_SURF_TILE_COLOR_BOTH (1 << 16)
+# define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
+# define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
+# define R200_SURF_TILE_NONE (0 << 16)
+# define R200_SURF_TILE_COLOR_MACRO (1 << 16)
+# define R200_SURF_TILE_COLOR_MICRO (2 << 16)
+# define R200_SURF_TILE_COLOR_BOTH (3 << 16)
+# define R200_SURF_TILE_DEPTH_32BPP (4 << 16)
+# define R200_SURF_TILE_DEPTH_16BPP (5 << 16)
+# define RADEON_SURF_AP0_SWP_16BPP (1 << 20)
+# define RADEON_SURF_AP0_SWP_32BPP (1 << 21)
+# define RADEON_SURF_AP1_SWP_16BPP (1 << 22)
+# define RADEON_SURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+#define RADEON_SURFACE1_INFO 0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
+#define RADEON_SURFACE2_INFO 0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
+#define RADEON_SURFACE3_INFO 0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
+#define RADEON_SURFACE4_INFO 0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
+#define RADEON_SURFACE5_INFO 0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
+#define RADEON_SURFACE6_INFO 0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
+#define RADEON_SURFACE7_INFO 0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
+#define RADEON_SW_SEMAPHORE 0x013c
+
+#define RADEON_TEST_DEBUG_CNTL 0x0120
+#define RADEON_TEST_DEBUG_MUX 0x0124
+#define RADEON_TEST_DEBUG_OUT 0x012c
+#define RADEON_TMDS_PLL_CNTL 0x02a8
+#define RADEON_TMDS_TRANSMITTER_CNTL 0x02a4
+# define RADEON_TMDS_TRANSMITTER_PLLEN 1
+# define RADEON_TMDS_TRANSMITTER_PLLRST 2
+#define RADEON_TRAIL_BRES_DEC 0x1614
+#define RADEON_TRAIL_BRES_ERR 0x160c
+#define RADEON_TRAIL_BRES_INC 0x1610
+#define RADEON_TRAIL_X 0x1618
+#define RADEON_TRAIL_X_SUB 0x1620
+
+#define RADEON_VCLK_ECP_CNTL 0x0008 /* PLL */
+# define RADEON_VCLK_SRC_SEL_MASK 0x03
+# define RADEON_VCLK_SRC_SEL_CPUCLK 0x00
+# define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
+# define RADEON_VCLK_SRC_SEL_BYTECLK 0x02
+# define RADEON_VCLK_SRC_SEL_PPLLCLK 0x03
+# define RADEON_PIXCLK_ALWAYS_ONb (1<<6)
+# define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
+
+#define RADEON_VENDOR_ID 0x0f00 /* PCI */
+#define RADEON_VGA_DDA_CONFIG 0x02e8
+#define RADEON_VGA_DDA_ON_OFF 0x02ec
+#define RADEON_VID_BUFFER_CONTROL 0x0900
+#define RADEON_VIDEOMUX_CNTL 0x0190
+#define RADEON_VIPH_CONTROL 0x0c40 /* ? */
+
+#define RADEON_WAIT_UNTIL 0x1720
+# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
+# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
+# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
+
+#define RADEON_X_MPLL_REF_FB_DIV 0x000a /* PLL */
+#define RADEON_XCLK_CNTL 0x000d /* PLL */
+#define RADEON_XDLL_CNTL 0x000c /* PLL */
+#define RADEON_XPLL_CNTL 0x000b /* PLL */
+
+
+
+ /* Registers for 3D/TCL */
+#define RADEON_PP_BORDER_COLOR_0 0x1d40
+#define RADEON_PP_BORDER_COLOR_1 0x1d44
+#define RADEON_PP_BORDER_COLOR_2 0x1d48
+#define RADEON_PP_CNTL 0x1c38
+# define RADEON_STIPPLE_ENABLE (1 << 0)
+# define RADEON_SCISSOR_ENABLE (1 << 1)
+# define RADEON_PATTERN_ENABLE (1 << 2)
+# define RADEON_SHADOW_ENABLE (1 << 3)
+# define RADEON_TEX_ENABLE_MASK (0xf << 4)
+# define RADEON_TEX_0_ENABLE (1 << 4)
+# define RADEON_TEX_1_ENABLE (1 << 5)
+# define RADEON_TEX_2_ENABLE (1 << 6)
+# define RADEON_TEX_3_ENABLE (1 << 7)
+# define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
+# define RADEON_TEX_BLEND_0_ENABLE (1 << 12)
+# define RADEON_TEX_BLEND_1_ENABLE (1 << 13)
+# define RADEON_TEX_BLEND_2_ENABLE (1 << 14)
+# define RADEON_TEX_BLEND_3_ENABLE (1 << 15)
+# define RADEON_PLANAR_YUV_ENABLE (1 << 20)
+# define RADEON_SPECULAR_ENABLE (1 << 21)
+# define RADEON_FOG_ENABLE (1 << 22)
+# define RADEON_ALPHA_TEST_ENABLE (1 << 23)
+# define RADEON_ANTI_ALIAS_NONE (0 << 24)
+# define RADEON_ANTI_ALIAS_LINE (1 << 24)
+# define RADEON_ANTI_ALIAS_POLY (2 << 24)
+# define RADEON_ANTI_ALIAS_LINE_POLY (3 << 24)
+# define RADEON_BUMP_MAP_ENABLE (1 << 26)
+# define RADEON_BUMPED_MAP_T0 (0 << 27)
+# define RADEON_BUMPED_MAP_T1 (1 << 27)
+# define RADEON_BUMPED_MAP_T2 (2 << 27)
+# define RADEON_TEX_3D_ENABLE_0 (1 << 29)
+# define RADEON_TEX_3D_ENABLE_1 (1 << 30)
+# define RADEON_MC_ENABLE (1 << 31)
+#define RADEON_PP_FOG_COLOR 0x1c18
+# define RADEON_FOG_COLOR_MASK 0x00ffffff
+# define RADEON_FOG_VERTEX (0 << 24)
+# define RADEON_FOG_TABLE (1 << 24)
+# define RADEON_FOG_USE_DEPTH (0 << 25)
+# define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
+# define RADEON_FOG_USE_SPEC_ALPHA (3 << 25)
+#define RADEON_PP_LUM_MATRIX 0x1d00
+#define RADEON_PP_MISC 0x1c14
+# define RADEON_REF_ALPHA_MASK 0x000000ff
+# define RADEON_ALPHA_TEST_FAIL (0 << 8)
+# define RADEON_ALPHA_TEST_LESS (1 << 8)
+# define RADEON_ALPHA_TEST_LEQUAL (2 << 8)
+# define RADEON_ALPHA_TEST_EQUAL (3 << 8)
+# define RADEON_ALPHA_TEST_GEQUAL (4 << 8)
+# define RADEON_ALPHA_TEST_GREATER (5 << 8)
+# define RADEON_ALPHA_TEST_NEQUAL (6 << 8)
+# define RADEON_ALPHA_TEST_PASS (7 << 8)
+# define RADEON_ALPHA_TEST_OP_MASK (7 << 8)
+# define RADEON_CHROMA_FUNC_FAIL (0 << 16)
+# define RADEON_CHROMA_FUNC_PASS (1 << 16)
+# define RADEON_CHROMA_FUNC_NEQUAL (2 << 16)
+# define RADEON_CHROMA_FUNC_EQUAL (3 << 16)
+# define RADEON_CHROMA_KEY_NEAREST (0 << 18)
+# define RADEON_CHROMA_KEY_ZERO (1 << 18)
+# define RADEON_SHADOW_ID_AUTO_INC (1 << 20)
+# define RADEON_SHADOW_FUNC_EQUAL (0 << 21)
+# define RADEON_SHADOW_FUNC_NEQUAL (1 << 21)
+# define RADEON_SHADOW_PASS_1 (0 << 22)
+# define RADEON_SHADOW_PASS_2 (1 << 22)
+# define RADEON_RIGHT_HAND_CUBE_D3D (0 << 24)
+# define RADEON_RIGHT_HAND_CUBE_OGL (1 << 24)
+#define RADEON_PP_ROT_MATRIX_0 0x1d58
+#define RADEON_PP_ROT_MATRIX_1 0x1d5c
+#define RADEON_PP_TXFILTER_0 0x1c54
+#define RADEON_PP_TXFILTER_1 0x1c6c
+#define RADEON_PP_TXFILTER_2 0x1c84
+# define RADEON_MAG_FILTER_NEAREST (0 << 0)
+# define RADEON_MAG_FILTER_LINEAR (1 << 0)
+# define RADEON_MAG_FILTER_MASK (1 << 0)
+# define RADEON_MIN_FILTER_NEAREST (0 << 1)
+# define RADEON_MIN_FILTER_LINEAR (1 << 1)
+# define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
+# define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
+# define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
+# define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST (8 << 1)
+# define RADEON_MIN_FILTER_ANISO_LINEAR (9 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
+# define RADEON_MIN_FILTER_MASK (15 << 1)
+# define RADEON_MAX_ANISO_1_TO_1 (0 << 5)
+# define RADEON_MAX_ANISO_2_TO_1 (1 << 5)
+# define RADEON_MAX_ANISO_4_TO_1 (2 << 5)
+# define RADEON_MAX_ANISO_8_TO_1 (3 << 5)
+# define RADEON_MAX_ANISO_16_TO_1 (4 << 5)
+# define RADEON_MAX_ANISO_MASK (7 << 5)
+# define RADEON_LOD_BIAS_MASK (0xff << 8)
+# define RADEON_LOD_BIAS_SHIFT 8
+# define RADEON_MAX_MIP_LEVEL_MASK (0x0f << 16)
+# define RADEON_MAX_MIP_LEVEL_SHIFT 16
+# define RADEON_YUV_TO_RGB (1 << 20)
+# define RADEON_YUV_TEMPERATURE_COOL (0 << 21)
+# define RADEON_YUV_TEMPERATURE_HOT (1 << 21)
+# define RADEON_YUV_TEMPERATURE_MASK (1 << 21)
+# define RADEON_WRAPEN_S (1 << 22)
+# define RADEON_CLAMP_S_WRAP (0 << 23)
+# define RADEON_CLAMP_S_MIRROR (1 << 23)
+# define RADEON_CLAMP_S_CLAMP_LAST (2 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
+# define RADEON_CLAMP_S_CLAMP_BORDER (4 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
+# define RADEON_CLAMP_S_CLAMP_GL (6 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
+# define RADEON_CLAMP_S_MASK (7 << 23)
+# define RADEON_WRAPEN_T (1 << 26)
+# define RADEON_CLAMP_T_WRAP (0 << 27)
+# define RADEON_CLAMP_T_MIRROR (1 << 27)
+# define RADEON_CLAMP_T_CLAMP_LAST (2 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
+# define RADEON_CLAMP_T_CLAMP_BORDER (4 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
+# define RADEON_CLAMP_T_CLAMP_GL (6 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
+# define RADEON_CLAMP_T_MASK (7 << 27)
+# define RADEON_BORDER_MODE_OGL (0 << 31)
+# define RADEON_BORDER_MODE_D3D (1 << 31)
+#define RADEON_PP_TXFORMAT_0 0x1c58
+#define RADEON_PP_TXFORMAT_1 0x1c70
+#define RADEON_PP_TXFORMAT_2 0x1c88
+# define RADEON_TXFORMAT_I8 (0 << 0)
+# define RADEON_TXFORMAT_AI88 (1 << 0)
+# define RADEON_TXFORMAT_RGB332 (2 << 0)
+# define RADEON_TXFORMAT_ARGB1555 (3 << 0)
+# define RADEON_TXFORMAT_RGB565 (4 << 0)
+# define RADEON_TXFORMAT_ARGB4444 (5 << 0)
+# define RADEON_TXFORMAT_ARGB8888 (6 << 0)
+# define RADEON_TXFORMAT_RGBA8888 (7 << 0)
+# define RADEON_TXFORMAT_Y8 (8 << 0)
+# define RADEON_TXFORMAT_VYUY422 (10 << 0)
+# define RADEON_TXFORMAT_YVYU422 (11 << 0)
+# define RADEON_TXFORMAT_DXT1 (12 << 0)
+# define RADEON_TXFORMAT_DXT23 (14 << 0)
+# define RADEON_TXFORMAT_DXT45 (15 << 0)
+# define RADEON_TXFORMAT_SHADOW16 (16 << 0)
+# define RADEON_TXFORMAT_SHADOW32 (17 << 0)
+# define RADEON_TXFORMAT_DUDV88 (18 << 0)
+# define RADEON_TXFORMAT_LDUDV655 (19 << 0)
+# define RADEON_TXFORMAT_LDUDUV8888 (20 << 0)
+# define RADEON_TXFORMAT_FORMAT_MASK (31 << 0)
+# define RADEON_TXFORMAT_FORMAT_SHIFT 0
+# define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5)
+# define RADEON_TXFORMAT_ALPHA_IN_MAP (1 << 6)
+# define RADEON_TXFORMAT_NON_POWER2 (1 << 7)
+# define RADEON_TXFORMAT_WIDTH_MASK (15 << 8)
+# define RADEON_TXFORMAT_WIDTH_SHIFT 8
+# define RADEON_TXFORMAT_HEIGHT_MASK (15 << 12)
+# define RADEON_TXFORMAT_HEIGHT_SHIFT 12
+# define RADEON_TXFORMAT_F5_WIDTH_MASK (15 << 16)
+# define RADEON_TXFORMAT_F5_WIDTH_SHIFT 16
+# define RADEON_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
+# define RADEON_TXFORMAT_F5_HEIGHT_SHIFT 20
+# define RADEON_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_MASK (3 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
+# define RADEON_TXFORMAT_ENDIAN_NO_SWAP (0 << 26)
+# define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP (1 << 26)
+# define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP (2 << 26)
+# define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3 << 26)
+# define RADEON_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
+# define RADEON_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
+# define RADEON_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
+# define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1 << 31)
+#define RADEON_PP_CUBIC_FACES_0 0x1d24
+#define RADEON_PP_CUBIC_FACES_1 0x1d28
+#define RADEON_PP_CUBIC_FACES_2 0x1d2c
+# define RADEON_FACE_WIDTH_1_SHIFT 0
+# define RADEON_FACE_HEIGHT_1_SHIFT 4
+# define RADEON_FACE_WIDTH_1_MASK (0xf << 0)
+# define RADEON_FACE_HEIGHT_1_MASK (0xf << 4)
+# define RADEON_FACE_WIDTH_2_SHIFT 8
+# define RADEON_FACE_HEIGHT_2_SHIFT 12
+# define RADEON_FACE_WIDTH_2_MASK (0xf << 8)
+# define RADEON_FACE_HEIGHT_2_MASK (0xf << 12)
+# define RADEON_FACE_WIDTH_3_SHIFT 16
+# define RADEON_FACE_HEIGHT_3_SHIFT 20
+# define RADEON_FACE_WIDTH_3_MASK (0xf << 16)
+# define RADEON_FACE_HEIGHT_3_MASK (0xf << 20)
+# define RADEON_FACE_WIDTH_4_SHIFT 24
+# define RADEON_FACE_HEIGHT_4_SHIFT 28
+# define RADEON_FACE_WIDTH_4_MASK (0xf << 24)
+# define RADEON_FACE_HEIGHT_4_MASK (0xf << 28)
+
+#define RADEON_PP_TXOFFSET_0 0x1c5c
+#define RADEON_PP_TXOFFSET_1 0x1c74
+#define RADEON_PP_TXOFFSET_2 0x1c8c
+# define RADEON_TXO_ENDIAN_NO_SWAP (0 << 0)
+# define RADEON_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+# define RADEON_TXO_ENDIAN_WORD_SWAP (2 << 0)
+# define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define RADEON_TXO_MACRO_LINEAR (0 << 2)
+# define RADEON_TXO_MACRO_TILE (1 << 2)
+# define RADEON_TXO_MICRO_LINEAR (0 << 3)
+# define RADEON_TXO_MICRO_TILE_X2 (1 << 3)
+# define RADEON_TXO_MICRO_TILE_OPT (2 << 3)
+# define RADEON_TXO_OFFSET_MASK 0xffffffe0
+# define RADEON_TXO_OFFSET_SHIFT 5
+
+#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T0_1 0x1dd4
+#define RADEON_PP_CUBIC_OFFSET_T0_2 0x1dd8
+#define RADEON_PP_CUBIC_OFFSET_T0_3 0x1ddc
+#define RADEON_PP_CUBIC_OFFSET_T0_4 0x1de0
+#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T1_1 0x1e04
+#define RADEON_PP_CUBIC_OFFSET_T1_2 0x1e08
+#define RADEON_PP_CUBIC_OFFSET_T1_3 0x1e0c
+#define RADEON_PP_CUBIC_OFFSET_T1_4 0x1e10
+#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
+#define RADEON_PP_CUBIC_OFFSET_T2_1 0x1e18
+#define RADEON_PP_CUBIC_OFFSET_T2_2 0x1e1c
+#define RADEON_PP_CUBIC_OFFSET_T2_3 0x1e20
+#define RADEON_PP_CUBIC_OFFSET_T2_4 0x1e24
+
+#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
+#define RADEON_PP_TEX_SIZE_1 0x1d0c
+#define RADEON_PP_TEX_SIZE_2 0x1d14
+# define RADEON_TEX_USIZE_MASK (0x7ff << 0)
+# define RADEON_TEX_USIZE_SHIFT 0
+# define RADEON_TEX_VSIZE_MASK (0x7ff << 16)
+# define RADEON_TEX_VSIZE_SHIFT 16
+# define RADEON_SIGNED_RGB_MASK (1 << 30)
+# define RADEON_SIGNED_RGB_SHIFT 30
+# define RADEON_SIGNED_ALPHA_MASK (1 << 31)
+# define RADEON_SIGNED_ALPHA_SHIFT 31
+#define RADEON_PP_TEX_PITCH_0 0x1d08 /* NPOT */
+#define RADEON_PP_TEX_PITCH_1 0x1d10 /* NPOT */
+#define RADEON_PP_TEX_PITCH_2 0x1d18 /* NPOT */
+/* note: bits 13-5: 32 byte aligned stride of texture map */
+
+#define RADEON_PP_TXCBLEND_0 0x1c60
+#define RADEON_PP_TXCBLEND_1 0x1c78
+#define RADEON_PP_TXCBLEND_2 0x1c90
+# define RADEON_COLOR_ARG_A_SHIFT 0
+# define RADEON_COLOR_ARG_A_MASK (0x1f << 0)
+# define RADEON_COLOR_ARG_A_ZERO (0 << 0)
+# define RADEON_COLOR_ARG_A_CURRENT_COLOR (2 << 0)
+# define RADEON_COLOR_ARG_A_CURRENT_ALPHA (3 << 0)
+# define RADEON_COLOR_ARG_A_DIFFUSE_COLOR (4 << 0)
+# define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA (5 << 0)
+# define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6 << 0)
+# define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7 << 0)
+# define RADEON_COLOR_ARG_A_TFACTOR_COLOR (8 << 0)
+# define RADEON_COLOR_ARG_A_TFACTOR_ALPHA (9 << 0)
+# define RADEON_COLOR_ARG_A_T0_COLOR (10 << 0)
+# define RADEON_COLOR_ARG_A_T0_ALPHA (11 << 0)
+# define RADEON_COLOR_ARG_A_T1_COLOR (12 << 0)
+# define RADEON_COLOR_ARG_A_T1_ALPHA (13 << 0)
+# define RADEON_COLOR_ARG_A_T2_COLOR (14 << 0)
+# define RADEON_COLOR_ARG_A_T2_ALPHA (15 << 0)
+# define RADEON_COLOR_ARG_A_T3_COLOR (16 << 0)
+# define RADEON_COLOR_ARG_A_T3_ALPHA (17 << 0)
+# define RADEON_COLOR_ARG_B_SHIFT 5
+# define RADEON_COLOR_ARG_B_MASK (0x1f << 5)
+# define RADEON_COLOR_ARG_B_ZERO (0 << 5)
+# define RADEON_COLOR_ARG_B_CURRENT_COLOR (2 << 5)
+# define RADEON_COLOR_ARG_B_CURRENT_ALPHA (3 << 5)
+# define RADEON_COLOR_ARG_B_DIFFUSE_COLOR (4 << 5)
+# define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA (5 << 5)
+# define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6 << 5)
+# define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7 << 5)
+# define RADEON_COLOR_ARG_B_TFACTOR_COLOR (8 << 5)
+# define RADEON_COLOR_ARG_B_TFACTOR_ALPHA (9 << 5)
+# define RADEON_COLOR_ARG_B_T0_COLOR (10 << 5)
+# define RADEON_COLOR_ARG_B_T0_ALPHA (11 << 5)
+# define RADEON_COLOR_ARG_B_T1_COLOR (12 << 5)
+# define RADEON_COLOR_ARG_B_T1_ALPHA (13 << 5)
+# define RADEON_COLOR_ARG_B_T2_COLOR (14 << 5)
+# define RADEON_COLOR_ARG_B_T2_ALPHA (15 << 5)
+# define RADEON_COLOR_ARG_B_T3_COLOR (16 << 5)
+# define RADEON_COLOR_ARG_B_T3_ALPHA (17 << 5)
+# define RADEON_COLOR_ARG_C_SHIFT 10
+# define RADEON_COLOR_ARG_C_MASK (0x1f << 10)
+# define RADEON_COLOR_ARG_C_ZERO (0 << 10)
+# define RADEON_COLOR_ARG_C_CURRENT_COLOR (2 << 10)
+# define RADEON_COLOR_ARG_C_CURRENT_ALPHA (3 << 10)
+# define RADEON_COLOR_ARG_C_DIFFUSE_COLOR (4 << 10)
+# define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA (5 << 10)
+# define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6 << 10)
+# define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7 << 10)
+# define RADEON_COLOR_ARG_C_TFACTOR_COLOR (8 << 10)
+# define RADEON_COLOR_ARG_C_TFACTOR_ALPHA (9 << 10)
+# define RADEON_COLOR_ARG_C_T0_COLOR (10 << 10)
+# define RADEON_COLOR_ARG_C_T0_ALPHA (11 << 10)
+# define RADEON_COLOR_ARG_C_T1_COLOR (12 << 10)
+# define RADEON_COLOR_ARG_C_T1_ALPHA (13 << 10)
+# define RADEON_COLOR_ARG_C_T2_COLOR (14 << 10)
+# define RADEON_COLOR_ARG_C_T2_ALPHA (15 << 10)
+# define RADEON_COLOR_ARG_C_T3_COLOR (16 << 10)
+# define RADEON_COLOR_ARG_C_T3_ALPHA (17 << 10)
+# define RADEON_COMP_ARG_A (1 << 15)
+# define RADEON_COMP_ARG_A_SHIFT 15
+# define RADEON_COMP_ARG_B (1 << 16)
+# define RADEON_COMP_ARG_B_SHIFT 16
+# define RADEON_COMP_ARG_C (1 << 17)
+# define RADEON_COMP_ARG_C_SHIFT 17
+# define RADEON_BLEND_CTL_MASK (7 << 18)
+# define RADEON_BLEND_CTL_ADD (0 << 18)
+# define RADEON_BLEND_CTL_SUBTRACT (1 << 18)
+# define RADEON_BLEND_CTL_ADDSIGNED (2 << 18)
+# define RADEON_BLEND_CTL_BLEND (3 << 18)
+# define RADEON_BLEND_CTL_DOT3 (4 << 18)
+# define RADEON_SCALE_SHIFT 21
+# define RADEON_SCALE_MASK (3 << 21)
+# define RADEON_SCALE_1X (0 << 21)
+# define RADEON_SCALE_2X (1 << 21)
+# define RADEON_SCALE_4X (2 << 21)
+# define RADEON_CLAMP_TX (1 << 23)
+# define RADEON_T0_EQ_TCUR (1 << 24)
+# define RADEON_T1_EQ_TCUR (1 << 25)
+# define RADEON_T2_EQ_TCUR (1 << 26)
+# define RADEON_T3_EQ_TCUR (1 << 27)
+# define RADEON_COLOR_ARG_MASK 0x1f
+# define RADEON_COMP_ARG_SHIFT 15
+#define RADEON_PP_TXABLEND_0 0x1c64
+#define RADEON_PP_TXABLEND_1 0x1c7c
+#define RADEON_PP_TXABLEND_2 0x1c94
+# define RADEON_ALPHA_ARG_A_SHIFT 0
+# define RADEON_ALPHA_ARG_A_MASK (0xf << 0)
+# define RADEON_ALPHA_ARG_A_ZERO (0 << 0)
+# define RADEON_ALPHA_ARG_A_CURRENT_ALPHA (1 << 0)
+# define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA (2 << 0)
+# define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3 << 0)
+# define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA (4 << 0)
+# define RADEON_ALPHA_ARG_A_T0_ALPHA (5 << 0)
+# define RADEON_ALPHA_ARG_A_T1_ALPHA (6 << 0)
+# define RADEON_ALPHA_ARG_A_T2_ALPHA (7 << 0)
+# define RADEON_ALPHA_ARG_A_T3_ALPHA (8 << 0)
+# define RADEON_ALPHA_ARG_B_SHIFT 4
+# define RADEON_ALPHA_ARG_B_MASK (0xf << 4)
+# define RADEON_ALPHA_ARG_B_ZERO (0 << 4)
+# define RADEON_ALPHA_ARG_B_CURRENT_ALPHA (1 << 4)
+# define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA (2 << 4)
+# define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3 << 4)
+# define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA (4 << 4)
+# define RADEON_ALPHA_ARG_B_T0_ALPHA (5 << 4)
+# define RADEON_ALPHA_ARG_B_T1_ALPHA (6 << 4)
+# define RADEON_ALPHA_ARG_B_T2_ALPHA (7 << 4)
+# define RADEON_ALPHA_ARG_B_T3_ALPHA (8 << 4)
+# define RADEON_ALPHA_ARG_C_SHIFT 8
+# define RADEON_ALPHA_ARG_C_MASK (0xf << 8)
+# define RADEON_ALPHA_ARG_C_ZERO (0 << 8)
+# define RADEON_ALPHA_ARG_C_CURRENT_ALPHA (1 << 8)
+# define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA (2 << 8)
+# define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3 << 8)
+# define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA (4 << 8)
+# define RADEON_ALPHA_ARG_C_T0_ALPHA (5 << 8)
+# define RADEON_ALPHA_ARG_C_T1_ALPHA (6 << 8)
+# define RADEON_ALPHA_ARG_C_T2_ALPHA (7 << 8)
+# define RADEON_ALPHA_ARG_C_T3_ALPHA (8 << 8)
+# define RADEON_DOT_ALPHA_DONT_REPLICATE (1 << 12)
+# define RADEON_ALPHA_ARG_MASK 0xf
+
+#define RADEON_PP_TFACTOR_0 0x1c68
+#define RADEON_PP_TFACTOR_1 0x1c80
+#define RADEON_PP_TFACTOR_2 0x1c98
+
+#define RADEON_RB3D_BLENDCNTL 0x1c20
+# define RADEON_COMB_FCN_MASK (3 << 12)
+# define RADEON_COMB_FCN_ADD_CLAMP (0 << 12)
+# define RADEON_COMB_FCN_ADD_NOCLAMP (1 << 12)
+# define RADEON_COMB_FCN_SUB_CLAMP (2 << 12)
+# define RADEON_COMB_FCN_SUB_NOCLAMP (3 << 12)
+# define RADEON_SRC_BLEND_GL_ZERO (32 << 16)
+# define RADEON_SRC_BLEND_GL_ONE (33 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_COLOR (34 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+# define RADEON_SRC_BLEND_GL_DST_COLOR (36 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+# define RADEON_SRC_BLEND_GL_DST_ALPHA (40 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
+# define RADEON_SRC_BLEND_MASK (63 << 16)
+# define RADEON_DST_BLEND_GL_ZERO (32 << 24)
+# define RADEON_DST_BLEND_GL_ONE (33 << 24)
+# define RADEON_DST_BLEND_GL_SRC_COLOR (34 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+# define RADEON_DST_BLEND_GL_DST_COLOR (36 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+# define RADEON_DST_BLEND_GL_SRC_ALPHA (38 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+# define RADEON_DST_BLEND_GL_DST_ALPHA (40 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+# define RADEON_DST_BLEND_MASK (63 << 24)
+#define RADEON_RB3D_CNTL 0x1c3c
+# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
+# define RADEON_PLANE_MASK_ENABLE (1 << 1)
+# define RADEON_DITHER_ENABLE (1 << 2)
+# define RADEON_ROUND_ENABLE (1 << 3)
+# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
+# define RADEON_DITHER_INIT (1 << 5)
+# define RADEON_ROP_ENABLE (1 << 6)
+# define RADEON_STENCIL_ENABLE (1 << 7)
+# define RADEON_Z_ENABLE (1 << 8)
+# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
+# define RADEON_COLOR_FORMAT_ARGB1555 (3 << 10)
+# define RADEON_COLOR_FORMAT_RGB565 (4 << 10)
+# define RADEON_COLOR_FORMAT_ARGB8888 (6 << 10)
+# define RADEON_COLOR_FORMAT_RGB332 (7 << 10)
+# define RADEON_COLOR_FORMAT_Y8 (8 << 10)
+# define RADEON_COLOR_FORMAT_RGB8 (9 << 10)
+# define RADEON_COLOR_FORMAT_YUV422_VYUY (11 << 10)
+# define RADEON_COLOR_FORMAT_YUV422_YVYU (12 << 10)
+# define RADEON_COLOR_FORMAT_aYUV444 (14 << 10)
+# define RADEON_COLOR_FORMAT_ARGB4444 (15 << 10)
+# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14)
+# define RADEON_ZBLOCK16 (1 << 15)
+#define RADEON_RB3D_COLOROFFSET 0x1c40
+# define RADEON_COLOROFFSET_MASK 0xfffffff0
+#define RADEON_RB3D_COLORPITCH 0x1c48
+# define RADEON_COLORPITCH_MASK 0x000001ff8
+# define RADEON_COLOR_TILE_ENABLE (1 << 16)
+# define RADEON_COLOR_MICROTILE_ENABLE (1 << 17)
+# define RADEON_COLOR_ENDIAN_NO_SWAP (0 << 18)
+# define RADEON_COLOR_ENDIAN_WORD_SWAP (1 << 18)
+# define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_DEPTHOFFSET 0x1c24
+#define RADEON_RB3D_DEPTHPITCH 0x1c28
+# define RADEON_DEPTHPITCH_MASK 0x00001ff8
+# define RADEON_DEPTH_HYPERZ (3 << 16)
+# define RADEON_DEPTH_ENDIAN_NO_SWAP (0 << 18)
+# define RADEON_DEPTH_ENDIAN_WORD_SWAP (1 << 18)
+# define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_PLANEMASK 0x1d84
+#define RADEON_RB3D_ROPCNTL 0x1d80
+# define RADEON_ROP_MASK (15 << 8)
+# define RADEON_ROP_CLEAR (0 << 8)
+# define RADEON_ROP_NOR (1 << 8)
+# define RADEON_ROP_AND_INVERTED (2 << 8)
+# define RADEON_ROP_COPY_INVERTED (3 << 8)
+# define RADEON_ROP_AND_REVERSE (4 << 8)
+# define RADEON_ROP_INVERT (5 << 8)
+# define RADEON_ROP_XOR (6 << 8)
+# define RADEON_ROP_NAND (7 << 8)
+# define RADEON_ROP_AND (8 << 8)
+# define RADEON_ROP_EQUIV (9 << 8)
+# define RADEON_ROP_NOOP (10 << 8)
+# define RADEON_ROP_OR_INVERTED (11 << 8)
+# define RADEON_ROP_COPY (12 << 8)
+# define RADEON_ROP_OR_REVERSE (13 << 8)
+# define RADEON_ROP_OR (14 << 8)
+# define RADEON_ROP_SET (15 << 8)
+#define RADEON_RB3D_STENCILREFMASK 0x1d7c
+# define RADEON_STENCIL_REF_SHIFT 0
+# define RADEON_STENCIL_REF_MASK (0xff << 0)
+# define RADEON_STENCIL_MASK_SHIFT 16
+# define RADEON_STENCIL_VALUE_MASK (0xff << 16)
+# define RADEON_STENCIL_WRITEMASK_SHIFT 24
+# define RADEON_STENCIL_WRITE_MASK (0xff << 24)
+#define RADEON_RB3D_ZPASS_DATA 0x3290
+#define RADEON_RB3D_ZPASS_ADDR 0x3294
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+# define RADEON_DEPTH_FORMAT_MASK (0xf << 0)
+# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_INT_Z (4 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5 << 0)
+# define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 << 0)
+# define RADEON_Z_TEST_NEVER (0 << 4)
+# define RADEON_Z_TEST_LESS (1 << 4)
+# define RADEON_Z_TEST_LEQUAL (2 << 4)
+# define RADEON_Z_TEST_EQUAL (3 << 4)
+# define RADEON_Z_TEST_GEQUAL (4 << 4)
+# define RADEON_Z_TEST_GREATER (5 << 4)
+# define RADEON_Z_TEST_NEQUAL (6 << 4)
+# define RADEON_Z_TEST_ALWAYS (7 << 4)
+# define RADEON_Z_TEST_MASK (7 << 4)
+# define RADEON_Z_HIERARCHY_ENABLE (1 << 8)
+# define RADEON_STENCIL_TEST_NEVER (0 << 12)
+# define RADEON_STENCIL_TEST_LESS (1 << 12)
+# define RADEON_STENCIL_TEST_LEQUAL (2 << 12)
+# define RADEON_STENCIL_TEST_EQUAL (3 << 12)
+# define RADEON_STENCIL_TEST_GEQUAL (4 << 12)
+# define RADEON_STENCIL_TEST_GREATER (5 << 12)
+# define RADEON_STENCIL_TEST_NEQUAL (6 << 12)
+# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
+# define RADEON_STENCIL_TEST_MASK (0x7 << 12)
+# define RADEON_STENCIL_FAIL_KEEP (0 << 16)
+# define RADEON_STENCIL_FAIL_ZERO (1 << 16)
+# define RADEON_STENCIL_FAIL_REPLACE (2 << 16)
+# define RADEON_STENCIL_FAIL_INC (3 << 16)
+# define RADEON_STENCIL_FAIL_DEC (4 << 16)
+# define RADEON_STENCIL_FAIL_INVERT (5 << 16)
+# define RADEON_STENCIL_FAIL_INC_WRAP (6 << 16)
+# define RADEON_STENCIL_FAIL_DEC_WRAP (7 << 16)
+# define RADEON_STENCIL_FAIL_MASK (0x7 << 16)
+# define RADEON_STENCIL_ZPASS_KEEP (0 << 20)
+# define RADEON_STENCIL_ZPASS_ZERO (1 << 20)
+# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
+# define RADEON_STENCIL_ZPASS_INC (3 << 20)
+# define RADEON_STENCIL_ZPASS_DEC (4 << 20)
+# define RADEON_STENCIL_ZPASS_INVERT (5 << 20)
+# define RADEON_STENCIL_ZPASS_INC_WRAP (6 << 20)
+# define RADEON_STENCIL_ZPASS_DEC_WRAP (7 << 20)
+# define RADEON_STENCIL_ZPASS_MASK (0x7 << 20)
+# define RADEON_STENCIL_ZFAIL_KEEP (0 << 24)
+# define RADEON_STENCIL_ZFAIL_ZERO (1 << 24)
+# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
+# define RADEON_STENCIL_ZFAIL_INC (3 << 24)
+# define RADEON_STENCIL_ZFAIL_DEC (4 << 24)
+# define RADEON_STENCIL_ZFAIL_INVERT (5 << 24)
+# define RADEON_STENCIL_ZFAIL_INC_WRAP (6 << 24)
+# define RADEON_STENCIL_ZFAIL_DEC_WRAP (7 << 24)
+# define RADEON_STENCIL_ZFAIL_MASK (0x7 << 24)
+# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
+# define RADEON_FORCE_Z_DIRTY (1 << 29)
+# define RADEON_Z_WRITE_ENABLE (1 << 30)
+# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31)
+
+#define RADEON_RE_STIPPLE_ADDR 0x1cc8
+#define RADEON_RE_STIPPLE_DATA 0x1ccc
+#define RADEON_RE_LINE_PATTERN 0x1cd0
+# define RADEON_LINE_PATTERN_MASK 0x0000ffff
+# define RADEON_LINE_REPEAT_COUNT_SHIFT 16
+# define RADEON_LINE_PATTERN_START_SHIFT 24
+# define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
+# define RADEON_LINE_PATTERN_BIG_BIT_ORDER (1 << 28)
+# define RADEON_LINE_PATTERN_AUTO_RESET (1 << 29)
+#define RADEON_RE_LINE_STATE 0x1cd4
+# define RADEON_LINE_CURRENT_PTR_SHIFT 0
+# define RADEON_LINE_CURRENT_COUNT_SHIFT 8
+#define RADEON_RE_MISC 0x26c4
+# define RADEON_STIPPLE_COORD_MASK 0x1f
+# define RADEON_STIPPLE_X_OFFSET_SHIFT 0
+# define RADEON_STIPPLE_X_OFFSET_MASK (0x1f << 0)
+# define RADEON_STIPPLE_Y_OFFSET_SHIFT 8
+# define RADEON_STIPPLE_Y_OFFSET_MASK (0x1f << 8)
+# define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
+# define RADEON_STIPPLE_BIG_BIT_ORDER (1 << 16)
+#define RADEON_RE_SOLID_COLOR 0x1c1c
+#define RADEON_RE_TOP_LEFT 0x26c0
+# define RADEON_RE_LEFT_SHIFT 0
+# define RADEON_RE_TOP_SHIFT 16
+#define RADEON_RE_WIDTH_HEIGHT 0x1c44
+# define RADEON_RE_WIDTH_SHIFT 0
+# define RADEON_RE_HEIGHT_SHIFT 16
+
+#define RADEON_SE_CNTL 0x1c4c
+# define RADEON_FFACE_CULL_CW (0 << 0)
+# define RADEON_FFACE_CULL_CCW (1 << 0)
+# define RADEON_FFACE_CULL_DIR_MASK (1 << 0)
+# define RADEON_BFACE_CULL (0 << 1)
+# define RADEON_BFACE_SOLID (3 << 1)
+# define RADEON_FFACE_CULL (0 << 3)
+# define RADEON_FFACE_SOLID (3 << 3)
+# define RADEON_FFACE_CULL_MASK (3 << 3)
+# define RADEON_BADVTX_CULL_DISABLE (1 << 5)
+# define RADEON_FLAT_SHADE_VTX_0 (0 << 6)
+# define RADEON_FLAT_SHADE_VTX_1 (1 << 6)
+# define RADEON_FLAT_SHADE_VTX_2 (2 << 6)
+# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
+# define RADEON_DIFFUSE_SHADE_SOLID (0 << 8)
+# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
+# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
+# define RADEON_DIFFUSE_SHADE_MASK (3 << 8)
+# define RADEON_ALPHA_SHADE_SOLID (0 << 10)
+# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
+# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
+# define RADEON_ALPHA_SHADE_MASK (3 << 10)
+# define RADEON_SPECULAR_SHADE_SOLID (0 << 12)
+# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
+# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+# define RADEON_SPECULAR_SHADE_MASK (3 << 12)
+# define RADEON_FOG_SHADE_SOLID (0 << 14)
+# define RADEON_FOG_SHADE_FLAT (1 << 14)
+# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
+# define RADEON_FOG_SHADE_MASK (3 << 14)
+# define RADEON_ZBIAS_ENABLE_POINT (1 << 16)
+# define RADEON_ZBIAS_ENABLE_LINE (1 << 17)
+# define RADEON_ZBIAS_ENABLE_TRI (1 << 18)
+# define RADEON_WIDELINE_ENABLE (1 << 20)
+# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
+# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
+# define RADEON_VTX_PIX_CENTER_D3D (0 << 27)
+# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
+# define RADEON_ROUND_MODE_TRUNC (0 << 28)
+# define RADEON_ROUND_MODE_ROUND (1 << 28)
+# define RADEON_ROUND_MODE_ROUND_EVEN (2 << 28)
+# define RADEON_ROUND_MODE_ROUND_ODD (3 << 28)
+# define RADEON_ROUND_PREC_16TH_PIX (0 << 30)
+# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
+# define RADEON_ROUND_PREC_4TH_PIX (2 << 30)
+# define RADEON_ROUND_PREC_HALF_PIX (3 << 30)
+#define RADEON_SE_CNTL_STATUS 0x2140
+# define RADEON_VC_NO_SWAP (0 << 0)
+# define RADEON_VC_16BIT_SWAP (1 << 0)
+# define RADEON_VC_32BIT_SWAP (2 << 0)
+# define RADEON_VC_HALF_DWORD_SWAP (3 << 0)
+# define RADEON_TCL_BYPASS (1 << 8)
+#define RADEON_SE_COORD_FMT 0x1c50
+# define RADEON_VTX_XY_PRE_MULT_1_OVER_W0 (1 << 0)
+# define RADEON_VTX_Z_PRE_MULT_1_OVER_W0 (1 << 1)
+# define RADEON_VTX_ST0_NONPARAMETRIC (1 << 8)
+# define RADEON_VTX_ST1_NONPARAMETRIC (1 << 9)
+# define RADEON_VTX_ST2_NONPARAMETRIC (1 << 10)
+# define RADEON_VTX_ST3_NONPARAMETRIC (1 << 11)
+# define RADEON_VTX_W0_NORMALIZE (1 << 12)
+# define RADEON_VTX_W0_IS_NOT_1_OVER_W0 (1 << 16)
+# define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
+# define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
+# define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
+# define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
+# define RADEON_TEX1_W_ROUTING_USE_W0 (0 << 26)
+# define RADEON_TEX1_W_ROUTING_USE_Q1 (1 << 26)
+#define RADEON_SE_LINE_WIDTH 0x1db8
+#define RADEON_SE_TCL_LIGHT_MODEL_CTL 0x226c
+# define RADEON_LIGHTING_ENABLE (1 << 0)
+# define RADEON_LIGHT_IN_MODELSPACE (1 << 1)
+# define RADEON_LOCAL_VIEWER (1 << 2)
+# define RADEON_NORMALIZE_NORMALS (1 << 3)
+# define RADEON_RESCALE_NORMALS (1 << 4)
+# define RADEON_SPECULAR_LIGHTS (1 << 5)
+# define RADEON_DIFFUSE_SPECULAR_COMBINE (1 << 6)
+# define RADEON_LIGHT_ALPHA (1 << 7)
+# define RADEON_LOCAL_LIGHT_VEC_GL (1 << 8)
+# define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
+# define RADEON_LM_SOURCE_STATE_PREMULT 0
+# define RADEON_LM_SOURCE_STATE_MULT 1
+# define RADEON_LM_SOURCE_VERTEX_DIFFUSE 2
+# define RADEON_LM_SOURCE_VERTEX_SPECULAR 3
+# define RADEON_EMISSIVE_SOURCE_SHIFT 16
+# define RADEON_AMBIENT_SOURCE_SHIFT 18
+# define RADEON_DIFFUSE_SOURCE_SHIFT 20
+# define RADEON_SPECULAR_SOURCE_SHIFT 22
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED 0x2220
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN 0x2224
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE 0x2228
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA 0x222c
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED 0x2230
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN 0x2234
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE 0x2238
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA 0x223c
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE 0x2218
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED 0x2240
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN 0x2244
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE 0x2248
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA 0x224c
+#define RADEON_SE_TCL_MATRIX_SELECT_0 0x225c
+# define RADEON_MODELVIEW_0_SHIFT 0
+# define RADEON_MODELVIEW_1_SHIFT 4
+# define RADEON_MODELVIEW_2_SHIFT 8
+# define RADEON_MODELVIEW_3_SHIFT 12
+# define RADEON_IT_MODELVIEW_0_SHIFT 16
+# define RADEON_IT_MODELVIEW_1_SHIFT 20
+# define RADEON_IT_MODELVIEW_2_SHIFT 24
+# define RADEON_IT_MODELVIEW_3_SHIFT 28
+#define RADEON_SE_TCL_MATRIX_SELECT_1 0x2260
+# define RADEON_MODELPROJECT_0_SHIFT 0
+# define RADEON_MODELPROJECT_1_SHIFT 4
+# define RADEON_MODELPROJECT_2_SHIFT 8
+# define RADEON_MODELPROJECT_3_SHIFT 12
+# define RADEON_TEXMAT_0_SHIFT 16
+# define RADEON_TEXMAT_1_SHIFT 20
+# define RADEON_TEXMAT_2_SHIFT 24
+# define RADEON_TEXMAT_3_SHIFT 28
+
+
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
+# define RADEON_TCL_VTX_W0 (1 << 0)
+# define RADEON_TCL_VTX_FP_DIFFUSE (1 << 1)
+# define RADEON_TCL_VTX_FP_ALPHA (1 << 2)
+# define RADEON_TCL_VTX_PK_DIFFUSE (1 << 3)
+# define RADEON_TCL_VTX_FP_SPEC (1 << 4)
+# define RADEON_TCL_VTX_FP_FOG (1 << 5)
+# define RADEON_TCL_VTX_PK_SPEC (1 << 6)
+# define RADEON_TCL_VTX_ST0 (1 << 7)
+# define RADEON_TCL_VTX_ST1 (1 << 8)
+# define RADEON_TCL_VTX_Q1 (1 << 9)
+# define RADEON_TCL_VTX_ST2 (1 << 10)
+# define RADEON_TCL_VTX_Q2 (1 << 11)
+# define RADEON_TCL_VTX_ST3 (1 << 12)
+# define RADEON_TCL_VTX_Q3 (1 << 13)
+# define RADEON_TCL_VTX_Q0 (1 << 14)
+# define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
+# define RADEON_TCL_VTX_NORM0 (1 << 18)
+# define RADEON_TCL_VTX_XY1 (1 << 27)
+# define RADEON_TCL_VTX_Z1 (1 << 28)
+# define RADEON_TCL_VTX_W1 (1 << 29)
+# define RADEON_TCL_VTX_NORM1 (1 << 30)
+# define RADEON_TCL_VTX_Z0 (1 << 31)
+
+#define RADEON_SE_TCL_OUTPUT_VTX_SEL 0x2258
+# define RADEON_TCL_COMPUTE_XYZW (1 << 0)
+# define RADEON_TCL_COMPUTE_DIFFUSE (1 << 1)
+# define RADEON_TCL_COMPUTE_SPECULAR (1 << 2)
+# define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
+# define RADEON_TCL_FORCE_INORDER_PROC (1 << 4)
+# define RADEON_TCL_TEX_INPUT_TEX_0 0
+# define RADEON_TCL_TEX_INPUT_TEX_1 1
+# define RADEON_TCL_TEX_INPUT_TEX_2 2
+# define RADEON_TCL_TEX_INPUT_TEX_3 3
+# define RADEON_TCL_TEX_COMPUTED_TEX_0 8
+# define RADEON_TCL_TEX_COMPUTED_TEX_1 9
+# define RADEON_TCL_TEX_COMPUTED_TEX_2 10
+# define RADEON_TCL_TEX_COMPUTED_TEX_3 11
+# define RADEON_TCL_TEX_0_OUTPUT_SHIFT 16
+# define RADEON_TCL_TEX_1_OUTPUT_SHIFT 20
+# define RADEON_TCL_TEX_2_OUTPUT_SHIFT 24
+# define RADEON_TCL_TEX_3_OUTPUT_SHIFT 28
+
+#define RADEON_SE_TCL_PER_LIGHT_CTL_0 0x2270
+# define RADEON_LIGHT_0_ENABLE (1 << 0)
+# define RADEON_LIGHT_0_ENABLE_AMBIENT (1 << 1)
+# define RADEON_LIGHT_0_ENABLE_SPECULAR (1 << 2)
+# define RADEON_LIGHT_0_IS_LOCAL (1 << 3)
+# define RADEON_LIGHT_0_IS_SPOT (1 << 4)
+# define RADEON_LIGHT_0_DUAL_CONE (1 << 5)
+# define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN (1 << 6)
+# define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 << 7)
+# define RADEON_LIGHT_0_SHIFT 0
+# define RADEON_LIGHT_1_ENABLE (1 << 16)
+# define RADEON_LIGHT_1_ENABLE_AMBIENT (1 << 17)
+# define RADEON_LIGHT_1_ENABLE_SPECULAR (1 << 18)
+# define RADEON_LIGHT_1_IS_LOCAL (1 << 19)
+# define RADEON_LIGHT_1_IS_SPOT (1 << 20)
+# define RADEON_LIGHT_1_DUAL_CONE (1 << 21)
+# define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN (1 << 22)
+# define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
+# define RADEON_LIGHT_1_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_1 0x2274
+# define RADEON_LIGHT_2_SHIFT 0
+# define RADEON_LIGHT_3_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_2 0x2278
+# define RADEON_LIGHT_4_SHIFT 0
+# define RADEON_LIGHT_5_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_3 0x227c
+# define RADEON_LIGHT_6_SHIFT 0
+# define RADEON_LIGHT_7_SHIFT 16
+
+#define RADEON_SE_TCL_STATE_FLUSH 0x2284
+
+#define RADEON_SE_TCL_SHININESS 0x2250
+
+#define RADEON_SE_TCL_TEXTURE_PROC_CTL 0x2268
+# define RADEON_TEXGEN_TEXMAT_0_ENABLE (1 << 0)
+# define RADEON_TEXGEN_TEXMAT_1_ENABLE (1 << 1)
+# define RADEON_TEXGEN_TEXMAT_2_ENABLE (1 << 2)
+# define RADEON_TEXGEN_TEXMAT_3_ENABLE (1 << 3)
+# define RADEON_TEXMAT_0_ENABLE (1 << 4)
+# define RADEON_TEXMAT_1_ENABLE (1 << 5)
+# define RADEON_TEXMAT_2_ENABLE (1 << 6)
+# define RADEON_TEXMAT_3_ENABLE (1 << 7)
+# define RADEON_TEXGEN_INPUT_MASK 0xf
+# define RADEON_TEXGEN_INPUT_TEXCOORD_0 0
+# define RADEON_TEXGEN_INPUT_TEXCOORD_1 1
+# define RADEON_TEXGEN_INPUT_TEXCOORD_2 2
+# define RADEON_TEXGEN_INPUT_TEXCOORD_3 3
+# define RADEON_TEXGEN_INPUT_OBJ 4
+# define RADEON_TEXGEN_INPUT_EYE 5
+# define RADEON_TEXGEN_INPUT_EYE_NORMAL 6
+# define RADEON_TEXGEN_INPUT_EYE_REFLECT 7
+# define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
+# define RADEON_TEXGEN_0_INPUT_SHIFT 16
+# define RADEON_TEXGEN_1_INPUT_SHIFT 20
+# define RADEON_TEXGEN_2_INPUT_SHIFT 24
+# define RADEON_TEXGEN_3_INPUT_SHIFT 28
+
+#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL 0x2264
+# define RADEON_UCP_IN_CLIP_SPACE (1 << 0)
+# define RADEON_UCP_IN_MODEL_SPACE (1 << 1)
+# define RADEON_UCP_ENABLE_0 (1 << 2)
+# define RADEON_UCP_ENABLE_1 (1 << 3)
+# define RADEON_UCP_ENABLE_2 (1 << 4)
+# define RADEON_UCP_ENABLE_3 (1 << 5)
+# define RADEON_UCP_ENABLE_4 (1 << 6)
+# define RADEON_UCP_ENABLE_5 (1 << 7)
+# define RADEON_TCL_FOG_MASK (3 << 8)
+# define RADEON_TCL_FOG_DISABLE (0 << 8)
+# define RADEON_TCL_FOG_EXP (1 << 8)
+# define RADEON_TCL_FOG_EXP2 (2 << 8)
+# define RADEON_TCL_FOG_LINEAR (3 << 8)
+# define RADEON_RNG_BASED_FOG (1 << 10)
+# define RADEON_LIGHT_TWOSIDE (1 << 11)
+# define RADEON_BLEND_OP_COUNT_MASK (7 << 12)
+# define RADEON_BLEND_OP_COUNT_SHIFT 12
+# define RADEON_POSITION_BLEND_OP_ENABLE (1 << 16)
+# define RADEON_NORMAL_BLEND_OP_ENABLE (1 << 17)
+# define RADEON_VERTEX_BLEND_SRC_0_PRIMARY (0 << 18)
+# define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
+# define RADEON_VERTEX_BLEND_SRC_1_PRIMARY (0 << 19)
+# define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
+# define RADEON_VERTEX_BLEND_SRC_2_PRIMARY (0 << 20)
+# define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
+# define RADEON_VERTEX_BLEND_SRC_3_PRIMARY (0 << 21)
+# define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
+# define RADEON_VERTEX_BLEND_WGT_MINUS_ONE (1 << 22)
+# define RADEON_CULL_FRONT_IS_CW (0 << 28)
+# define RADEON_CULL_FRONT_IS_CCW (1 << 28)
+# define RADEON_CULL_FRONT (1 << 29)
+# define RADEON_CULL_BACK (1 << 30)
+# define RADEON_FORCE_W_TO_ONE (1 << 31)
+
+#define RADEON_SE_VPORT_XSCALE 0x1d98
+#define RADEON_SE_VPORT_XOFFSET 0x1d9c
+#define RADEON_SE_VPORT_YSCALE 0x1da0
+#define RADEON_SE_VPORT_YOFFSET 0x1da4
+#define RADEON_SE_VPORT_ZSCALE 0x1da8
+#define RADEON_SE_VPORT_ZOFFSET 0x1dac
+#define RADEON_SE_ZBIAS_FACTOR 0x1db0
+#define RADEON_SE_ZBIAS_CONSTANT 0x1db4
+
+#define RADEON_SE_VTX_FMT 0x2080
+# define RADEON_SE_VTX_FMT_XY 0x00000000
+# define RADEON_SE_VTX_FMT_W0 0x00000001
+# define RADEON_SE_VTX_FMT_FPCOLOR 0x00000002
+# define RADEON_SE_VTX_FMT_FPALPHA 0x00000004
+# define RADEON_SE_VTX_FMT_PKCOLOR 0x00000008
+# define RADEON_SE_VTX_FMT_FPSPEC 0x00000010
+# define RADEON_SE_VTX_FMT_FPFOG 0x00000020
+# define RADEON_SE_VTX_FMT_PKSPEC 0x00000040
+# define RADEON_SE_VTX_FMT_ST0 0x00000080
+# define RADEON_SE_VTX_FMT_ST1 0x00000100
+# define RADEON_SE_VTX_FMT_Q1 0x00000200
+# define RADEON_SE_VTX_FMT_ST2 0x00000400
+# define RADEON_SE_VTX_FMT_Q2 0x00000800
+# define RADEON_SE_VTX_FMT_ST3 0x00001000
+# define RADEON_SE_VTX_FMT_Q3 0x00002000
+# define RADEON_SE_VTX_FMT_Q0 0x00004000
+# define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK 0x00038000
+# define RADEON_SE_VTX_FMT_N0 0x00040000
+# define RADEON_SE_VTX_FMT_XY1 0x08000000
+# define RADEON_SE_VTX_FMT_Z1 0x10000000
+# define RADEON_SE_VTX_FMT_W1 0x20000000
+# define RADEON_SE_VTX_FMT_N1 0x40000000
+# define RADEON_SE_VTX_FMT_Z 0x80000000
+
+ /* Registers for CP and Microcode Engine */
+#define RADEON_CP_ME_RAM_ADDR 0x07d4
+#define RADEON_CP_ME_RAM_RADDR 0x07d8
+#define RADEON_CP_ME_RAM_DATAH 0x07dc
+#define RADEON_CP_ME_RAM_DATAL 0x07e0
+
+#define RADEON_CP_RB_BASE 0x0700
+#define RADEON_CP_RB_CNTL 0x0704
+#define RADEON_CP_RB_RPTR_ADDR 0x070c
+#define RADEON_CP_RB_RPTR 0x0710
+#define RADEON_CP_RB_WPTR 0x0714
+
+#define RADEON_CP_IB_BASE 0x0738
+#define RADEON_CP_IB_BUFSZ 0x073c
+
+#define RADEON_CP_CSQ_CNTL 0x0740
+# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
+# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
+# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
+# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
+# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
+# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
+#define RADEON_CP_CSQ_STAT 0x07f8
+# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)
+# define RADEON_CSQ_RPTR_INDIRECT_MASK (0xff << 16)
+# define RADEON_CSQ_WPTR_INDIRECT_MASK (0xff << 24)
+#define RADEON_CP_CSQ_ADDR 0x07f0
+#define RADEON_CP_CSQ_DATA 0x07f4
+#define RADEON_CP_CSQ_APER_PRIMARY 0x1000
+#define RADEON_CP_CSQ_APER_INDIRECT 0x1300
+
+#define RADEON_CP_RB_WPTR_DELAY 0x0718
+# define RADEON_PRE_WRITE_TIMER_SHIFT 0
+# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
+
+#define RADEON_AIC_CNTL 0x01d0
+# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
+#define RADEON_AIC_LO_ADDR 0x01dc
+
+
+
+ /* Constants */
+#define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0
+#define RADEON_LAST_CLEAR_REG RADEON_GUI_SCRATCH_REG2
+
+
+
+ /* CP packet types */
+#define RADEON_CP_PACKET0 0x00000000
+#define RADEON_CP_PACKET1 0x40000000
+#define RADEON_CP_PACKET2 0x80000000
+#define RADEON_CP_PACKET3 0xC0000000
+# define RADEON_CP_PACKET_MASK 0xC0000000
+# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
+# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
+# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
+# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
+# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
+
+#define RADEON_CP_PACKET0_ONE_REG_WR 0x00008000
+
+#define RADEON_CP_PACKET3_NOP 0xC0001000
+#define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00
+#define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300
+#define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900
+#define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00
+#define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00
+#define R200_CP_CMD_3D_DRAW_VBUF_2 0xC0003400
+#define R200_CP_CMD_3D_DRAW_IMMD_2 0xC0003500
+#define R200_CP_CMD_3D_DRAW_INDX_2 0xC0003600
+#define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100
+#define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400
+#define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00
+
+
+#define RADEON_CP_VC_FRMT_XY 0x00000000
+#define RADEON_CP_VC_FRMT_W0 0x00000001
+#define RADEON_CP_VC_FRMT_FPCOLOR 0x00000002
+#define RADEON_CP_VC_FRMT_FPALPHA 0x00000004
+#define RADEON_CP_VC_FRMT_PKCOLOR 0x00000008
+#define RADEON_CP_VC_FRMT_FPSPEC 0x00000010
+#define RADEON_CP_VC_FRMT_FPFOG 0x00000020
+#define RADEON_CP_VC_FRMT_PKSPEC 0x00000040
+#define RADEON_CP_VC_FRMT_ST0 0x00000080
+#define RADEON_CP_VC_FRMT_ST1 0x00000100
+#define RADEON_CP_VC_FRMT_Q1 0x00000200
+#define RADEON_CP_VC_FRMT_ST2 0x00000400
+#define RADEON_CP_VC_FRMT_Q2 0x00000800
+#define RADEON_CP_VC_FRMT_ST3 0x00001000
+#define RADEON_CP_VC_FRMT_Q3 0x00002000
+#define RADEON_CP_VC_FRMT_Q0 0x00004000
+#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK 0x00038000
+#define RADEON_CP_VC_FRMT_N0 0x00040000
+#define RADEON_CP_VC_FRMT_XY1 0x08000000
+#define RADEON_CP_VC_FRMT_Z1 0x10000000
+#define RADEON_CP_VC_FRMT_W1 0x20000000
+#define RADEON_CP_VC_FRMT_N1 0x40000000
+#define RADEON_CP_VC_FRMT_Z 0x80000000
+
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP 0x00000003
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2 0x00000007
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST 0x00000008
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST 0x0000000a
+#define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010
+#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020
+#define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA 0x00000000
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA 0x00000040
+#define RADEON_CP_VC_CNTL_MAOS_ENABLE 0x00000080
+#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE 0x00000000
+#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE 0x00000100
+#define RADEON_CP_VC_CNTL_TCL_DISABLE 0x00000000
+#define RADEON_CP_VC_CNTL_TCL_ENABLE 0x00000200
+#define RADEON_CP_VC_CNTL_NUM_SHIFT 16
+
+#define RADEON_VS_MATRIX_0_ADDR 0
+#define RADEON_VS_MATRIX_1_ADDR 4
+#define RADEON_VS_MATRIX_2_ADDR 8
+#define RADEON_VS_MATRIX_3_ADDR 12
+#define RADEON_VS_MATRIX_4_ADDR 16
+#define RADEON_VS_MATRIX_5_ADDR 20
+#define RADEON_VS_MATRIX_6_ADDR 24
+#define RADEON_VS_MATRIX_7_ADDR 28
+#define RADEON_VS_MATRIX_8_ADDR 32
+#define RADEON_VS_MATRIX_9_ADDR 36
+#define RADEON_VS_MATRIX_10_ADDR 40
+#define RADEON_VS_MATRIX_11_ADDR 44
+#define RADEON_VS_MATRIX_12_ADDR 48
+#define RADEON_VS_MATRIX_13_ADDR 52
+#define RADEON_VS_MATRIX_14_ADDR 56
+#define RADEON_VS_MATRIX_15_ADDR 60
+#define RADEON_VS_LIGHT_AMBIENT_ADDR 64
+#define RADEON_VS_LIGHT_DIFFUSE_ADDR 72
+#define RADEON_VS_LIGHT_SPECULAR_ADDR 80
+#define RADEON_VS_LIGHT_DIRPOS_ADDR 88
+#define RADEON_VS_LIGHT_HWVSPOT_ADDR 96
+#define RADEON_VS_LIGHT_ATTENUATION_ADDR 104
+#define RADEON_VS_MATRIX_EYE2CLIP_ADDR 112
+#define RADEON_VS_UCP_ADDR 116
+#define RADEON_VS_GLOBAL_AMBIENT_ADDR 122
+#define RADEON_VS_FOG_PARAM_ADDR 123
+#define RADEON_VS_EYE_VECTOR_ADDR 124
+
+#define RADEON_SS_LIGHT_DCD_ADDR 0
+#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR 8
+#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR 16
+#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR 24
+#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR 32
+#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR 48
+#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR 49
+#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR 50
+#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR 51
+#define RADEON_SS_SHININESS 60
+
+#define RADEON_TV_MASTER_CNTL 0x0800
+# define RADEON_TVCLK_ALWAYS_ONb (1 << 30)
+#define RADEON_TV_DAC_CNTL 0x088c
+# define RADEON_TV_DAC_CMPOUT (1 << 5)
+#define RADEON_TV_PRE_DAC_MUX_CNTL 0x0888
+# define RADEON_Y_RED_EN (1 << 0)
+# define RADEON_C_GRN_EN (1 << 1)
+# define RADEON_CMP_BLU_EN (1 << 2)
+# define RADEON_RED_MX_FORCE_DAC_DATA (6 << 4)
+# define RADEON_GRN_MX_FORCE_DAC_DATA (6 << 8)
+# define RADEON_BLU_MX_FORCE_DAC_DATA (6 << 12)
+# define RADEON_TV_FORCE_DAC_DATA_SHIFT 16
+#endif