Back to index

libdrm  2.4.37
intel_bufmgr_fake.c
Go to the documentation of this file.
00001 /**************************************************************************
00002  * 
00003  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
00004  * All Rights Reserved.
00005  * 
00006  * Permission is hereby granted, free of charge, to any person obtaining a
00007  * copy of this software and associated documentation files (the
00008  * "Software"), to deal in the Software without restriction, including
00009  * without limitation the rights to use, copy, modify, merge, publish,
00010  * distribute, sub license, and/or sell copies of the Software, and to
00011  * permit persons to whom the Software is furnished to do so, subject to
00012  * the following conditions:
00013  * 
00014  * The above copyright notice and this permission notice (including the
00015  * next paragraph) shall be included in all copies or substantial portions
00016  * of the Software.
00017  * 
00018  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
00019  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
00020  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
00021  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
00022  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
00023  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
00024  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
00025  * 
00026  **************************************************************************/
00027 
00028 /* Originally a fake version of the buffer manager so that we can
00029  * prototype the changes in a driver fairly quickly, has been fleshed
00030  * out to a fully functional interim solution.
00031  *
00032  * Basically wraps the old style memory management in the new
00033  * programming interface, but is more expressive and avoids many of
00034  * the bugs in the old texture manager.
00035  */
00036 
00037 #ifdef HAVE_CONFIG_H
00038 #include "config.h"
00039 #endif
00040 
00041 #include <stdlib.h>
00042 #include <string.h>
00043 #include <assert.h>
00044 #include <errno.h>
00045 #include <xf86drm.h>
00046 #include <pthread.h>
00047 #include "intel_bufmgr.h"
00048 #include "intel_bufmgr_priv.h"
00049 #include "drm.h"
00050 #include "i915_drm.h"
00051 #include "mm.h"
00052 #include "libdrm_lists.h"
00053 
00054 /* Support gcc's __FUNCTION__ for people using other compilers */
00055 #if !defined(__GNUC__) && !defined(__FUNCTION__)
00056 # define __FUNCTION__ __func__ /* C99 */
00057 #endif
00058 
00059 #define DBG(...) do {                                   \
00060        if (bufmgr_fake->bufmgr.debug)                   \
00061               drmMsg(__VA_ARGS__);               \
00062 } while (0)
00063 
00064 /* Internal flags:
00065  */
00066 #define BM_NO_BACKING_STORE               0x00000001
00067 #define BM_NO_FENCE_SUBDATA               0x00000002
00068 #define BM_PINNED                         0x00000004
00069 
00070 /* Wrapper around mm.c's mem_block, which understands that you must
00071  * wait for fences to expire before memory can be freed.  This is
00072  * specific to our use of memcpy for uploads - an upload that was
00073  * processed through the command queue wouldn't need to care about
00074  * fences.
00075  */
00076 #define MAX_RELOCS 4096
00077 
00078 struct fake_buffer_reloc {
00080        drm_intel_bo *target_buf;
00082        uint32_t offset;
00086        uint32_t last_target_offset;
00088        uint32_t delta;
00090        uint32_t read_domains;
00092        uint32_t write_domain;
00093 };
00094 
00095 struct block {
00096        struct block *next, *prev;
00097        struct mem_block *mem;      /* BM_MEM_AGP */
00098 
00103        unsigned on_hardware:1;
00108        unsigned fenced:1;
00109 
00111        unsigned fence;             /* Split to read_fence, write_fence */
00112 
00113        drm_intel_bo *bo;
00114        void *virtual;
00115 };
00116 
00117 typedef struct _bufmgr_fake {
00118        drm_intel_bufmgr bufmgr;
00119 
00120        pthread_mutex_t lock;
00121 
00122        unsigned long low_offset;
00123        unsigned long size;
00124        void *virtual;
00125 
00126        struct mem_block *heap;
00127 
00128        unsigned buf_nr;     /* for generating ids */
00129 
00134        struct block on_hardware;
00139        struct block fenced;
00144        struct block lru;
00145 
00146        unsigned int last_fence;
00147 
00148        unsigned fail:1;
00149        unsigned need_fence:1;
00150        int thrashing;
00151 
00161        unsigned int (*fence_emit) (void *private);
00163        void (*fence_wait) (unsigned int fence, void *private);
00164        void *fence_priv;
00165 
00172        int (*exec) (drm_intel_bo *bo, unsigned int used, void *priv);
00173        void *exec_priv;
00174 
00176        void *driver_priv;
00180        volatile int *last_dispatch;
00181 
00182        int fd;
00183 
00184        int debug;
00185 
00186        int performed_rendering;
00187 } drm_intel_bufmgr_fake;
00188 
00189 typedef struct _drm_intel_bo_fake {
00190        drm_intel_bo bo;
00191 
00192        unsigned id;         /* debug only */
00193        const char *name;
00194 
00195        unsigned dirty:1;
00199        unsigned card_dirty:1;
00200        unsigned int refcount;
00201        /* Flags may consist of any of the DRM_BO flags, plus
00202         * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
00203         * first two driver private flags.
00204         */
00205        uint64_t flags;
00207        uint32_t read_domains;
00209        uint32_t write_domain;
00210 
00211        unsigned int alignment;
00212        int is_static, validated;
00213        unsigned int map_count;
00214 
00216        struct fake_buffer_reloc *relocs;
00217        int nr_relocs;
00223        unsigned int child_size;
00224 
00225        struct block *block;
00226        void *backing_store;
00227        void (*invalidate_cb) (drm_intel_bo *bo, void *ptr);
00228        void *invalidate_ptr;
00229 } drm_intel_bo_fake;
00230 
00231 static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
00232                      unsigned int fence_cookie);
00233 
00234 #define MAXFENCE 0x7fffffff
00235 
00236 static int
00237 FENCE_LTE(unsigned a, unsigned b)
00238 {
00239        if (a == b)
00240               return 1;
00241 
00242        if (a < b && b - a < (1 << 24))
00243               return 1;
00244 
00245        if (a > b && MAXFENCE - a + b < (1 << 24))
00246               return 1;
00247 
00248        return 0;
00249 }
00250 
00251 void
00252 drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
00253                                     unsigned int (*emit) (void *priv),
00254                                     void (*wait) (unsigned int fence,
00255                                                  void *priv),
00256                                     void *priv)
00257 {
00258        drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
00259 
00260        bufmgr_fake->fence_emit = emit;
00261        bufmgr_fake->fence_wait = wait;
00262        bufmgr_fake->fence_priv = priv;
00263 }
00264 
00265 static unsigned int
00266 _fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
00267 {
00268        struct drm_i915_irq_emit ie;
00269        int ret, seq = 1;
00270 
00271        if (bufmgr_fake->fence_emit != NULL) {
00272               seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
00273               return seq;
00274        }
00275 
00276        ie.irq_seq = &seq;
00277        ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
00278                               &ie, sizeof(ie));
00279        if (ret) {
00280               drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__, ret);
00281               abort();
00282        }
00283 
00284        DBG("emit 0x%08x\n", seq);
00285        return seq;
00286 }
00287 
00288 static void
00289 _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
00290 {
00291        struct drm_i915_irq_wait iw;
00292        int hw_seq, busy_count = 0;
00293        int ret;
00294        int kernel_lied;
00295 
00296        if (bufmgr_fake->fence_wait != NULL) {
00297               bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
00298               clear_fenced(bufmgr_fake, seq);
00299               return;
00300        }
00301 
00302        iw.irq_seq = seq;
00303 
00304        DBG("wait 0x%08x\n", iw.irq_seq);
00305 
00306        /* The kernel IRQ_WAIT implementation is all sorts of broken.
00307         * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
00308         *    unsigned range.
00309         * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
00310         *    signed range.
00311         * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
00312         *    signed range.
00313         * 4) It returns -EBUSY in 3 seconds even if the hardware is still
00314         *    successfully chewing through buffers.
00315         *
00316         * Assume that in userland we treat sequence numbers as ints, which
00317         * makes some of the comparisons convenient, since the sequence
00318         * numbers are all postive signed integers.
00319         *
00320         * From this we get several cases we need to handle.  Here's a timeline.
00321         * 0x2   0x7                                    0x7ffffff8   0x7ffffffd
00322         *   |    |                                             |    |
00323         * ------------------------------------------------------------
00324         *
00325         * A) Normal wait for hw to catch up
00326         * hw_seq seq
00327         *   |    |
00328         * ------------------------------------------------------------
00329         * seq - hw_seq = 5.  If we call IRQ_WAIT, it will wait for hw to
00330         * catch up.
00331         *
00332         * B) Normal wait for a sequence number that's already passed.
00333         * seq    hw_seq
00334         *   |    |
00335         * ------------------------------------------------------------
00336         * seq - hw_seq = -5.  If we call IRQ_WAIT, it returns 0 quickly.
00337         *
00338         * C) Hardware has already wrapped around ahead of us
00339         * hw_seq                                                    seq
00340         *   |                                                       |
00341         * ------------------------------------------------------------
00342         * seq - hw_seq = 0x80000000 - 5.  If we called IRQ_WAIT, it would wait
00343         * for hw_seq >= seq, which may never occur.  Thus, we want to catch
00344         * this in userland and return 0.
00345         *
00346         * D) We've wrapped around ahead of the hardware.
00347         * seq                                                      hw_seq
00348         *   |                                                       |
00349         * ------------------------------------------------------------
00350         * seq - hw_seq = -(0x80000000 - 5).  If we called IRQ_WAIT, it would
00351         * return 0 quickly because hw_seq >= seq, even though the hardware
00352         * isn't caught up. Thus, we need to catch this early return in
00353         * userland and bother the kernel until the hardware really does
00354         * catch up.
00355         *
00356         * E) Hardware might wrap after we test in userland.
00357         *                                                  hw_seq  seq
00358         *                                                      |    |
00359         * ------------------------------------------------------------
00360         * seq - hw_seq = 5.  If we call IRQ_WAIT, it will likely see seq >=
00361         * hw_seq and wait.  However, suppose hw_seq wraps before we make it
00362         * into the kernel.  The kernel sees hw_seq >= seq and waits for 3
00363         * seconds then returns -EBUSY.  This is case C).  We should catch
00364         * this and then return successfully.
00365         *
00366         * F) Hardware might take a long time on a buffer.
00367         * hw_seq seq
00368         *   |    |
00369         * -------------------------------------------------------------------
00370         * seq - hw_seq = 5.  If we call IRQ_WAIT, if sequence 2 through 5
00371         * take too long, it will return -EBUSY.  Batchbuffers in the
00372         * gltestperf demo were seen to take up to 7 seconds.  We should
00373         * catch early -EBUSY return and keep trying.
00374         */
00375 
00376        do {
00377               /* Keep a copy of last_dispatch so that if the wait -EBUSYs
00378                * because the hardware didn't catch up in 3 seconds, we can
00379                * see if it at least made progress and retry.
00380                */
00381               hw_seq = *bufmgr_fake->last_dispatch;
00382 
00383               /* Catch case C */
00384               if (seq - hw_seq > 0x40000000)
00385                      return;
00386 
00387               ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
00388                                   &iw, sizeof(iw));
00389               /* Catch case D */
00390               kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
00391                                         -0x40000000);
00392 
00393               /* Catch case E */
00394               if (ret == -EBUSY
00395                   && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
00396                      ret = 0;
00397 
00398               /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
00399               if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
00400                      busy_count = 0;
00401               else
00402                      busy_count++;
00403        } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
00404                (ret == -EBUSY && busy_count < 5));
00405 
00406        if (ret != 0) {
00407               drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
00408                      __LINE__, strerror(-ret));
00409               abort();
00410        }
00411        clear_fenced(bufmgr_fake, seq);
00412 }
00413 
00414 static int
00415 _fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
00416 {
00417        /* Slight problem with wrap-around:
00418         */
00419        return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
00420 }
00421 
00425 static int
00426 alloc_block(drm_intel_bo *bo)
00427 {
00428        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00429        drm_intel_bufmgr_fake *bufmgr_fake =
00430            (drm_intel_bufmgr_fake *) bo->bufmgr;
00431        struct block *block = (struct block *)calloc(sizeof *block, 1);
00432        unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
00433        unsigned int sz;
00434 
00435        if (!block)
00436               return 1;
00437 
00438        sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
00439 
00440        block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
00441        if (!block->mem) {
00442               free(block);
00443               return 0;
00444        }
00445 
00446        DRMINITLISTHEAD(block);
00447 
00448        /* Insert at head or at tail??? */
00449        DRMLISTADDTAIL(block, &bufmgr_fake->lru);
00450 
00451        block->virtual = (uint8_t *) bufmgr_fake->virtual +
00452            block->mem->ofs - bufmgr_fake->low_offset;
00453        block->bo = bo;
00454 
00455        bo_fake->block = block;
00456 
00457        return 1;
00458 }
00459 
00460 /* Release the card storage associated with buf:
00461  */
00462 static void
00463 free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
00464           int skip_dirty_copy)
00465 {
00466        drm_intel_bo_fake *bo_fake;
00467        DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
00468            block->on_hardware, block->fenced);
00469 
00470        if (!block)
00471               return;
00472 
00473        bo_fake = (drm_intel_bo_fake *) block->bo;
00474 
00475        if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
00476               skip_dirty_copy = 1;
00477 
00478        if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
00479               memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
00480               bo_fake->card_dirty = 0;
00481               bo_fake->dirty = 1;
00482        }
00483 
00484        if (block->on_hardware) {
00485               block->bo = NULL;
00486        } else if (block->fenced) {
00487               block->bo = NULL;
00488        } else {
00489               DBG("    - free immediately\n");
00490               DRMLISTDEL(block);
00491 
00492               mmFreeMem(block->mem);
00493               free(block);
00494        }
00495 }
00496 
00497 static void
00498 alloc_backing_store(drm_intel_bo *bo)
00499 {
00500        drm_intel_bufmgr_fake *bufmgr_fake =
00501            (drm_intel_bufmgr_fake *) bo->bufmgr;
00502        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00503        assert(!bo_fake->backing_store);
00504        assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
00505 
00506        bo_fake->backing_store = malloc(bo->size);
00507 
00508        DBG("alloc_backing - buf %d %p %d\n", bo_fake->id,
00509            bo_fake->backing_store, bo->size);
00510        assert(bo_fake->backing_store);
00511 }
00512 
00513 static void
00514 free_backing_store(drm_intel_bo *bo)
00515 {
00516        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00517 
00518        if (bo_fake->backing_store) {
00519               assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
00520               free(bo_fake->backing_store);
00521               bo_fake->backing_store = NULL;
00522        }
00523 }
00524 
00525 static void
00526 set_dirty(drm_intel_bo *bo)
00527 {
00528        drm_intel_bufmgr_fake *bufmgr_fake =
00529            (drm_intel_bufmgr_fake *) bo->bufmgr;
00530        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00531 
00532        if (bo_fake->flags & BM_NO_BACKING_STORE
00533            && bo_fake->invalidate_cb != NULL)
00534               bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
00535 
00536        assert(!(bo_fake->flags & BM_PINNED));
00537 
00538        DBG("set_dirty - buf %d\n", bo_fake->id);
00539        bo_fake->dirty = 1;
00540 }
00541 
00542 static int
00543 evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
00544 {
00545        struct block *block, *tmp;
00546 
00547        DBG("%s\n", __FUNCTION__);
00548 
00549        DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
00550               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
00551 
00552               if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
00553                      continue;
00554 
00555               if (block->fence && max_fence && !FENCE_LTE(block->fence,
00556                                                      max_fence))
00557                      return 0;
00558 
00559               set_dirty(&bo_fake->bo);
00560               bo_fake->block = NULL;
00561 
00562               free_block(bufmgr_fake, block, 0);
00563               return 1;
00564        }
00565 
00566        return 0;
00567 }
00568 
00569 static int
00570 evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
00571 {
00572        struct block *block, *tmp;
00573 
00574        DBG("%s\n", __FUNCTION__);
00575 
00576        DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
00577               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
00578 
00579               if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
00580                      continue;
00581 
00582               set_dirty(&bo_fake->bo);
00583               bo_fake->block = NULL;
00584 
00585               free_block(bufmgr_fake, block, 0);
00586               return 1;
00587        }
00588 
00589        return 0;
00590 }
00591 
00595 static int
00596 clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
00597 {
00598        struct block *block, *tmp;
00599        int ret = 0;
00600 
00601        bufmgr_fake->last_fence = fence_cookie;
00602        DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
00603               assert(block->fenced);
00604 
00605               if (_fence_test(bufmgr_fake, block->fence)) {
00606 
00607                      block->fenced = 0;
00608 
00609                      if (!block->bo) {
00610                             DBG("delayed free: offset %x sz %x\n",
00611                                 block->mem->ofs, block->mem->size);
00612                             DRMLISTDEL(block);
00613                             mmFreeMem(block->mem);
00614                             free(block);
00615                      } else {
00616                             DBG("return to lru: offset %x sz %x\n",
00617                                 block->mem->ofs, block->mem->size);
00618                             DRMLISTDEL(block);
00619                             DRMLISTADDTAIL(block, &bufmgr_fake->lru);
00620                      }
00621 
00622                      ret = 1;
00623               } else {
00624                      /* Blocks are ordered by fence, so if one fails, all
00625                       * from here will fail also:
00626                       */
00627                      DBG("fence not passed: offset %x sz %x %d %d \n",
00628                          block->mem->ofs, block->mem->size, block->fence,
00629                          bufmgr_fake->last_fence);
00630                      break;
00631               }
00632        }
00633 
00634        DBG("%s: %d\n", __FUNCTION__, ret);
00635        return ret;
00636 }
00637 
00638 static void
00639 fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
00640 {
00641        struct block *block, *tmp;
00642 
00643        DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
00644               DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
00645                   block, block->mem->size, block->mem->ofs, block->bo, fence);
00646               block->fence = fence;
00647 
00648               block->on_hardware = 0;
00649               block->fenced = 1;
00650 
00651               /* Move to tail of pending list here
00652                */
00653               DRMLISTDEL(block);
00654               DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
00655        }
00656 
00657        assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
00658 }
00659 
00660 static int
00661 evict_and_alloc_block(drm_intel_bo *bo)
00662 {
00663        drm_intel_bufmgr_fake *bufmgr_fake =
00664            (drm_intel_bufmgr_fake *) bo->bufmgr;
00665        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00666 
00667        assert(bo_fake->block == NULL);
00668 
00669        /* Search for already free memory:
00670         */
00671        if (alloc_block(bo))
00672               return 1;
00673 
00674        /* If we're not thrashing, allow lru eviction to dig deeper into
00675         * recently used textures.  We'll probably be thrashing soon:
00676         */
00677        if (!bufmgr_fake->thrashing) {
00678               while (evict_lru(bufmgr_fake, 0))
00679                      if (alloc_block(bo))
00680                             return 1;
00681        }
00682 
00683        /* Keep thrashing counter alive?
00684         */
00685        if (bufmgr_fake->thrashing)
00686               bufmgr_fake->thrashing = 20;
00687 
00688        /* Wait on any already pending fences - here we are waiting for any
00689         * freed memory that has been submitted to hardware and fenced to
00690         * become available:
00691         */
00692        while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
00693               uint32_t fence = bufmgr_fake->fenced.next->fence;
00694               _fence_wait_internal(bufmgr_fake, fence);
00695 
00696               if (alloc_block(bo))
00697                      return 1;
00698        }
00699 
00700        if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
00701               while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
00702                      uint32_t fence = bufmgr_fake->fenced.next->fence;
00703                      _fence_wait_internal(bufmgr_fake, fence);
00704               }
00705 
00706               if (!bufmgr_fake->thrashing) {
00707                      DBG("thrashing\n");
00708               }
00709               bufmgr_fake->thrashing = 20;
00710 
00711               if (alloc_block(bo))
00712                      return 1;
00713        }
00714 
00715        while (evict_mru(bufmgr_fake))
00716               if (alloc_block(bo))
00717                      return 1;
00718 
00719        DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
00720 
00721        return 0;
00722 }
00723 
00724 /***********************************************************************
00725  * Public functions
00726  */
00727 
00731 static void
00732 drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
00733 {
00734        unsigned int cookie;
00735 
00736        cookie = _fence_emit_internal(bufmgr_fake);
00737        _fence_wait_internal(bufmgr_fake, cookie);
00738 }
00739 
00746 static void
00747 drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
00748 {
00749        drm_intel_bufmgr_fake *bufmgr_fake =
00750            (drm_intel_bufmgr_fake *) bo->bufmgr;
00751        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00752 
00753        if (bo_fake->block == NULL || !bo_fake->block->fenced)
00754               return;
00755 
00756        _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
00757 }
00758 
00759 static void
00760 drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
00761 {
00762        drm_intel_bufmgr_fake *bufmgr_fake =
00763            (drm_intel_bufmgr_fake *) bo->bufmgr;
00764 
00765        pthread_mutex_lock(&bufmgr_fake->lock);
00766        drm_intel_fake_bo_wait_rendering_locked(bo);
00767        pthread_mutex_unlock(&bufmgr_fake->lock);
00768 }
00769 
00770 /* Specifically ignore texture memory sharing.
00771  *  -- just evict everything
00772  *  -- and wait for idle
00773  */
00774 void
00775 drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
00776 {
00777        drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
00778        struct block *block, *tmp;
00779 
00780        pthread_mutex_lock(&bufmgr_fake->lock);
00781 
00782        bufmgr_fake->need_fence = 1;
00783        bufmgr_fake->fail = 0;
00784 
00785        /* Wait for hardware idle.  We don't know where acceleration has been
00786         * happening, so we'll need to wait anyway before letting anything get
00787         * put on the card again.
00788         */
00789        drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
00790 
00791        /* Check that we hadn't released the lock without having fenced the last
00792         * set of buffers.
00793         */
00794        assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
00795        assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
00796 
00797        DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
00798               assert(_fence_test(bufmgr_fake, block->fence));
00799               set_dirty(block->bo);
00800        }
00801 
00802        pthread_mutex_unlock(&bufmgr_fake->lock);
00803 }
00804 
00805 static drm_intel_bo *
00806 drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
00807                      const char *name,
00808                      unsigned long size,
00809                      unsigned int alignment)
00810 {
00811        drm_intel_bufmgr_fake *bufmgr_fake;
00812        drm_intel_bo_fake *bo_fake;
00813 
00814        bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
00815 
00816        assert(size != 0);
00817 
00818        bo_fake = calloc(1, sizeof(*bo_fake));
00819        if (!bo_fake)
00820               return NULL;
00821 
00822        bo_fake->bo.size = size;
00823        bo_fake->bo.offset = -1;
00824        bo_fake->bo.virtual = NULL;
00825        bo_fake->bo.bufmgr = bufmgr;
00826        bo_fake->refcount = 1;
00827 
00828        /* Alignment must be a power of two */
00829        assert((alignment & (alignment - 1)) == 0);
00830        if (alignment == 0)
00831               alignment = 1;
00832        bo_fake->alignment = alignment;
00833        bo_fake->id = ++bufmgr_fake->buf_nr;
00834        bo_fake->name = name;
00835        bo_fake->flags = 0;
00836        bo_fake->is_static = 0;
00837 
00838        DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
00839            bo_fake->bo.size / 1024);
00840 
00841        return &bo_fake->bo;
00842 }
00843 
00844 static drm_intel_bo *
00845 drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
00846                            const char *name,
00847                            int x, int y, int cpp,
00848                            uint32_t *tiling_mode,
00849                            unsigned long *pitch,
00850                            unsigned long flags)
00851 {
00852        unsigned long stride, aligned_y;
00853 
00854        /* No runtime tiling support for fake. */
00855        *tiling_mode = I915_TILING_NONE;
00856 
00857        /* Align it for being a render target.  Shouldn't need anything else. */
00858        stride = x * cpp;
00859        stride = ROUND_UP_TO(stride, 64);
00860 
00861        /* 965 subspan loading alignment */
00862        aligned_y = ALIGN(y, 2);
00863 
00864        *pitch = stride;
00865 
00866        return drm_intel_fake_bo_alloc(bufmgr, name, stride * aligned_y,
00867                                    4096);
00868 }
00869 
00870 drm_intel_bo *
00871 drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
00872                             const char *name,
00873                             unsigned long offset,
00874                             unsigned long size, void *virtual)
00875 {
00876        drm_intel_bufmgr_fake *bufmgr_fake;
00877        drm_intel_bo_fake *bo_fake;
00878 
00879        bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
00880 
00881        assert(size != 0);
00882 
00883        bo_fake = calloc(1, sizeof(*bo_fake));
00884        if (!bo_fake)
00885               return NULL;
00886 
00887        bo_fake->bo.size = size;
00888        bo_fake->bo.offset = offset;
00889        bo_fake->bo.virtual = virtual;
00890        bo_fake->bo.bufmgr = bufmgr;
00891        bo_fake->refcount = 1;
00892        bo_fake->id = ++bufmgr_fake->buf_nr;
00893        bo_fake->name = name;
00894        bo_fake->flags = BM_PINNED;
00895        bo_fake->is_static = 1;
00896 
00897        DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id,
00898            bo_fake->name, bo_fake->bo.size / 1024);
00899 
00900        return &bo_fake->bo;
00901 }
00902 
00903 static void
00904 drm_intel_fake_bo_reference(drm_intel_bo *bo)
00905 {
00906        drm_intel_bufmgr_fake *bufmgr_fake =
00907            (drm_intel_bufmgr_fake *) bo->bufmgr;
00908        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00909 
00910        pthread_mutex_lock(&bufmgr_fake->lock);
00911        bo_fake->refcount++;
00912        pthread_mutex_unlock(&bufmgr_fake->lock);
00913 }
00914 
00915 static void
00916 drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
00917 {
00918        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00919 
00920        bo_fake->refcount++;
00921 }
00922 
00923 static void
00924 drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
00925 {
00926        drm_intel_bufmgr_fake *bufmgr_fake =
00927            (drm_intel_bufmgr_fake *) bo->bufmgr;
00928        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00929        int i;
00930 
00931        if (--bo_fake->refcount == 0) {
00932               assert(bo_fake->map_count == 0);
00933               /* No remaining references, so free it */
00934               if (bo_fake->block)
00935                      free_block(bufmgr_fake, bo_fake->block, 1);
00936               free_backing_store(bo);
00937 
00938               for (i = 0; i < bo_fake->nr_relocs; i++)
00939                      drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
00940                                                       target_buf);
00941 
00942               DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
00943                   bo_fake->name);
00944 
00945               free(bo_fake->relocs);
00946               free(bo);
00947        }
00948 }
00949 
00950 static void
00951 drm_intel_fake_bo_unreference(drm_intel_bo *bo)
00952 {
00953        drm_intel_bufmgr_fake *bufmgr_fake =
00954            (drm_intel_bufmgr_fake *) bo->bufmgr;
00955 
00956        pthread_mutex_lock(&bufmgr_fake->lock);
00957        drm_intel_fake_bo_unreference_locked(bo);
00958        pthread_mutex_unlock(&bufmgr_fake->lock);
00959 }
00960 
00965 void
00966 drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
00967                                    void (*invalidate_cb) (drm_intel_bo *bo,
00968                                                         void *ptr),
00969                                    void *ptr)
00970 {
00971        drm_intel_bufmgr_fake *bufmgr_fake =
00972            (drm_intel_bufmgr_fake *) bo->bufmgr;
00973        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
00974 
00975        pthread_mutex_lock(&bufmgr_fake->lock);
00976 
00977        if (bo_fake->backing_store)
00978               free_backing_store(bo);
00979 
00980        bo_fake->flags |= BM_NO_BACKING_STORE;
00981 
00982        DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
00983        bo_fake->dirty = 1;
00984        bo_fake->invalidate_cb = invalidate_cb;
00985        bo_fake->invalidate_ptr = ptr;
00986 
00987        /* Note that it is invalid right from the start.  Also note
00988         * invalidate_cb is called with the bufmgr locked, so cannot
00989         * itself make bufmgr calls.
00990         */
00991        if (invalidate_cb != NULL)
00992               invalidate_cb(bo, ptr);
00993 
00994        pthread_mutex_unlock(&bufmgr_fake->lock);
00995 }
00996 
01001 static int
01002  drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
01003 {
01004        drm_intel_bufmgr_fake *bufmgr_fake =
01005            (drm_intel_bufmgr_fake *) bo->bufmgr;
01006        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
01007 
01008        /* Static buffers are always mapped. */
01009        if (bo_fake->is_static) {
01010               if (bo_fake->card_dirty) {
01011                      drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
01012                      bo_fake->card_dirty = 0;
01013               }
01014               return 0;
01015        }
01016 
01017        /* Allow recursive mapping.  Mesa may recursively map buffers with
01018         * nested display loops, and it is used internally in bufmgr_fake
01019         * for relocation.
01020         */
01021        if (bo_fake->map_count++ != 0)
01022               return 0;
01023 
01024        {
01025               DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id,
01026                   bo_fake->name, bo_fake->bo.size / 1024);
01027 
01028               if (bo->virtual != NULL) {
01029                      drmMsg("%s: already mapped\n", __FUNCTION__);
01030                      abort();
01031               } else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
01032 
01033                      if (!bo_fake->block && !evict_and_alloc_block(bo)) {
01034                             DBG("%s: alloc failed\n", __FUNCTION__);
01035                             bufmgr_fake->fail = 1;
01036                             return 1;
01037                      } else {
01038                             assert(bo_fake->block);
01039                             bo_fake->dirty = 0;
01040 
01041                             if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
01042                                 bo_fake->block->fenced) {
01043                                    drm_intel_fake_bo_wait_rendering_locked
01044                                        (bo);
01045                             }
01046 
01047                             bo->virtual = bo_fake->block->virtual;
01048                      }
01049               } else {
01050                      if (write_enable)
01051                             set_dirty(bo);
01052 
01053                      if (bo_fake->backing_store == 0)
01054                             alloc_backing_store(bo);
01055 
01056                      if ((bo_fake->card_dirty == 1) && bo_fake->block) {
01057                             if (bo_fake->block->fenced)
01058                                    drm_intel_fake_bo_wait_rendering_locked
01059                                        (bo);
01060 
01061                             memcpy(bo_fake->backing_store,
01062                                    bo_fake->block->virtual,
01063                                    bo_fake->block->bo->size);
01064                             bo_fake->card_dirty = 0;
01065                      }
01066 
01067                      bo->virtual = bo_fake->backing_store;
01068               }
01069        }
01070 
01071        return 0;
01072 }
01073 
01074 static int
01075  drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
01076 {
01077        drm_intel_bufmgr_fake *bufmgr_fake =
01078            (drm_intel_bufmgr_fake *) bo->bufmgr;
01079        int ret;
01080 
01081        pthread_mutex_lock(&bufmgr_fake->lock);
01082        ret = drm_intel_fake_bo_map_locked(bo, write_enable);
01083        pthread_mutex_unlock(&bufmgr_fake->lock);
01084 
01085        return ret;
01086 }
01087 
01088 static int
01089  drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
01090 {
01091        drm_intel_bufmgr_fake *bufmgr_fake =
01092            (drm_intel_bufmgr_fake *) bo->bufmgr;
01093        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
01094 
01095        /* Static buffers are always mapped. */
01096        if (bo_fake->is_static)
01097               return 0;
01098 
01099        assert(bo_fake->map_count != 0);
01100        if (--bo_fake->map_count != 0)
01101               return 0;
01102 
01103        DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
01104            bo_fake->bo.size / 1024);
01105 
01106        bo->virtual = NULL;
01107 
01108        return 0;
01109 }
01110 
01111 static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
01112 {
01113        drm_intel_bufmgr_fake *bufmgr_fake =
01114            (drm_intel_bufmgr_fake *) bo->bufmgr;
01115        int ret;
01116 
01117        pthread_mutex_lock(&bufmgr_fake->lock);
01118        ret = drm_intel_fake_bo_unmap_locked(bo);
01119        pthread_mutex_unlock(&bufmgr_fake->lock);
01120 
01121        return ret;
01122 }
01123 
01124 static int
01125 drm_intel_fake_bo_subdata(drm_intel_bo *bo, unsigned long offset,
01126                        unsigned long size, const void *data)
01127 {
01128        int ret;
01129 
01130        if (size == 0 || data == NULL)
01131               return 0;
01132 
01133        ret = drm_intel_bo_map(bo, 1);
01134        if (ret)
01135               return ret;
01136        memcpy((unsigned char *)bo->virtual + offset, data, size);
01137        drm_intel_bo_unmap(bo);
01138        return 0;
01139 }
01140 
01141 static void
01142  drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
01143 {
01144        struct block *block, *tmp;
01145 
01146        bufmgr_fake->performed_rendering = 0;
01147        /* okay for ever BO that is on the HW kick it off.
01148           seriously not afraid of the POLICE right now */
01149        DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
01150               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
01151 
01152               block->on_hardware = 0;
01153               free_block(bufmgr_fake, block, 0);
01154               bo_fake->block = NULL;
01155               bo_fake->validated = 0;
01156               if (!(bo_fake->flags & BM_NO_BACKING_STORE))
01157                      bo_fake->dirty = 1;
01158        }
01159 
01160 }
01161 
01162 static int
01163  drm_intel_fake_bo_validate(drm_intel_bo *bo)
01164 {
01165        drm_intel_bufmgr_fake *bufmgr_fake;
01166        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
01167 
01168        bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
01169 
01170        DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id,
01171            bo_fake->name, bo_fake->bo.size / 1024);
01172 
01173        /* Sanity check: Buffers should be unmapped before being validated.
01174         * This is not so much of a problem for bufmgr_fake, but TTM refuses,
01175         * and the problem is harder to debug there.
01176         */
01177        assert(bo_fake->map_count == 0);
01178 
01179        if (bo_fake->is_static) {
01180               /* Add it to the needs-fence list */
01181               bufmgr_fake->need_fence = 1;
01182               return 0;
01183        }
01184 
01185        /* Allocate the card memory */
01186        if (!bo_fake->block && !evict_and_alloc_block(bo)) {
01187               bufmgr_fake->fail = 1;
01188               DBG("Failed to validate buf %d:%s\n", bo_fake->id,
01189                   bo_fake->name);
01190               return -1;
01191        }
01192 
01193        assert(bo_fake->block);
01194        assert(bo_fake->block->bo == &bo_fake->bo);
01195 
01196        bo->offset = bo_fake->block->mem->ofs;
01197 
01198        /* Upload the buffer contents if necessary */
01199        if (bo_fake->dirty) {
01200               DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
01201                   bo_fake->name, bo->size, bo_fake->block->mem->ofs);
01202 
01203               assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
01204 
01205               /* Actually, should be able to just wait for a fence on the
01206                * mmory, hich we would be tracking when we free it.  Waiting
01207                * for idle is a sufficiently large hammer for now.
01208                */
01209               drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
01210 
01211               /* we may never have mapped this BO so it might not have any
01212                * backing store if this happens it should be rare, but 0 the
01213                * card memory in any case */
01214               if (bo_fake->backing_store)
01215                      memcpy(bo_fake->block->virtual, bo_fake->backing_store,
01216                             bo->size);
01217               else
01218                      memset(bo_fake->block->virtual, 0, bo->size);
01219 
01220               bo_fake->dirty = 0;
01221        }
01222 
01223        bo_fake->block->fenced = 0;
01224        bo_fake->block->on_hardware = 1;
01225        DRMLISTDEL(bo_fake->block);
01226        DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
01227 
01228        bo_fake->validated = 1;
01229        bufmgr_fake->need_fence = 1;
01230 
01231        return 0;
01232 }
01233 
01234 static void
01235 drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
01236 {
01237        drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
01238        unsigned int cookie;
01239 
01240        cookie = _fence_emit_internal(bufmgr_fake);
01241        fence_blocks(bufmgr_fake, cookie);
01242 
01243        DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
01244 }
01245 
01246 static void
01247 drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
01248 {
01249        drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
01250 
01251        pthread_mutex_destroy(&bufmgr_fake->lock);
01252        mmDestroy(bufmgr_fake->heap);
01253        free(bufmgr);
01254 }
01255 
01256 static int
01257 drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
01258                        drm_intel_bo *target_bo, uint32_t target_offset,
01259                        uint32_t read_domains, uint32_t write_domain)
01260 {
01261        drm_intel_bufmgr_fake *bufmgr_fake =
01262            (drm_intel_bufmgr_fake *) bo->bufmgr;
01263        struct fake_buffer_reloc *r;
01264        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
01265        drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
01266        int i;
01267 
01268        pthread_mutex_lock(&bufmgr_fake->lock);
01269 
01270        assert(bo);
01271        assert(target_bo);
01272 
01273        if (bo_fake->relocs == NULL) {
01274               bo_fake->relocs =
01275                   malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
01276        }
01277 
01278        r = &bo_fake->relocs[bo_fake->nr_relocs++];
01279 
01280        assert(bo_fake->nr_relocs <= MAX_RELOCS);
01281 
01282        drm_intel_fake_bo_reference_locked(target_bo);
01283 
01284        if (!target_fake->is_static) {
01285               bo_fake->child_size +=
01286                   ALIGN(target_bo->size, target_fake->alignment);
01287               bo_fake->child_size += target_fake->child_size;
01288        }
01289        r->target_buf = target_bo;
01290        r->offset = offset;
01291        r->last_target_offset = target_bo->offset;
01292        r->delta = target_offset;
01293        r->read_domains = read_domains;
01294        r->write_domain = write_domain;
01295 
01296        if (bufmgr_fake->debug) {
01297               /* Check that a conflicting relocation hasn't already been
01298                * emitted.
01299                */
01300               for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
01301                      struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
01302 
01303                      assert(r->offset != r2->offset);
01304               }
01305        }
01306 
01307        pthread_mutex_unlock(&bufmgr_fake->lock);
01308 
01309        return 0;
01310 }
01311 
01316 static void
01317 drm_intel_fake_calculate_domains(drm_intel_bo *bo)
01318 {
01319        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
01320        int i;
01321 
01322        for (i = 0; i < bo_fake->nr_relocs; i++) {
01323               struct fake_buffer_reloc *r = &bo_fake->relocs[i];
01324               drm_intel_bo_fake *target_fake =
01325                   (drm_intel_bo_fake *) r->target_buf;
01326 
01327               /* Do the same for the tree of buffers we depend on */
01328               drm_intel_fake_calculate_domains(r->target_buf);
01329 
01330               target_fake->read_domains |= r->read_domains;
01331               target_fake->write_domain |= r->write_domain;
01332        }
01333 }
01334 
01335 static int
01336 drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
01337 {
01338        drm_intel_bufmgr_fake *bufmgr_fake =
01339            (drm_intel_bufmgr_fake *) bo->bufmgr;
01340        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
01341        int i, ret;
01342 
01343        assert(bo_fake->map_count == 0);
01344 
01345        for (i = 0; i < bo_fake->nr_relocs; i++) {
01346               struct fake_buffer_reloc *r = &bo_fake->relocs[i];
01347               drm_intel_bo_fake *target_fake =
01348                   (drm_intel_bo_fake *) r->target_buf;
01349               uint32_t reloc_data;
01350 
01351               /* Validate the target buffer if that hasn't been done. */
01352               if (!target_fake->validated) {
01353                      ret =
01354                          drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
01355                      if (ret != 0) {
01356                             if (bo->virtual != NULL)
01357                                    drm_intel_fake_bo_unmap_locked(bo);
01358                             return ret;
01359                      }
01360               }
01361 
01362               /* Calculate the value of the relocation entry. */
01363               if (r->target_buf->offset != r->last_target_offset) {
01364                      reloc_data = r->target_buf->offset + r->delta;
01365 
01366                      if (bo->virtual == NULL)
01367                             drm_intel_fake_bo_map_locked(bo, 1);
01368 
01369                      *(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
01370                          reloc_data;
01371 
01372                      r->last_target_offset = r->target_buf->offset;
01373               }
01374        }
01375 
01376        if (bo->virtual != NULL)
01377               drm_intel_fake_bo_unmap_locked(bo);
01378 
01379        if (bo_fake->write_domain != 0) {
01380               if (!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))) {
01381                      if (bo_fake->backing_store == 0)
01382                             alloc_backing_store(bo);
01383               }
01384               bo_fake->card_dirty = 1;
01385               bufmgr_fake->performed_rendering = 1;
01386        }
01387 
01388        return drm_intel_fake_bo_validate(bo);
01389 }
01390 
01391 static void
01392 drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
01393 {
01394        drm_intel_bufmgr_fake *bufmgr_fake =
01395            (drm_intel_bufmgr_fake *) bo->bufmgr;
01396        drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
01397        int i;
01398 
01399        for (i = 0; i < bo_fake->nr_relocs; i++) {
01400               struct fake_buffer_reloc *r = &bo_fake->relocs[i];
01401               drm_intel_bo_fake *target_fake =
01402                   (drm_intel_bo_fake *) r->target_buf;
01403 
01404               if (target_fake->validated)
01405                      drm_intel_bo_fake_post_submit(r->target_buf);
01406 
01407               DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
01408                   bo_fake->name, (uint32_t) bo->offset, r->offset,
01409                   target_fake->name, (uint32_t) r->target_buf->offset,
01410                   r->delta);
01411        }
01412 
01413        assert(bo_fake->map_count == 0);
01414        bo_fake->validated = 0;
01415        bo_fake->read_domains = 0;
01416        bo_fake->write_domain = 0;
01417 }
01418 
01419 void
01420 drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
01421                                         int (*exec) (drm_intel_bo *bo,
01422                                                    unsigned int used,
01423                                                    void *priv),
01424                                         void *priv)
01425 {
01426        drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
01427 
01428        bufmgr_fake->exec = exec;
01429        bufmgr_fake->exec_priv = priv;
01430 }
01431 
01432 static int
01433 drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
01434                      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
01435 {
01436        drm_intel_bufmgr_fake *bufmgr_fake =
01437            (drm_intel_bufmgr_fake *) bo->bufmgr;
01438        drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
01439        struct drm_i915_batchbuffer batch;
01440        int ret;
01441        int retry_count = 0;
01442 
01443        pthread_mutex_lock(&bufmgr_fake->lock);
01444 
01445        bufmgr_fake->performed_rendering = 0;
01446 
01447        drm_intel_fake_calculate_domains(bo);
01448 
01449        batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
01450 
01451        /* we've ran out of RAM so blow the whole lot away and retry */
01452 restart:
01453        ret = drm_intel_fake_reloc_and_validate_buffer(bo);
01454        if (bufmgr_fake->fail == 1) {
01455               if (retry_count == 0) {
01456                      retry_count++;
01457                      drm_intel_fake_kick_all_locked(bufmgr_fake);
01458                      bufmgr_fake->fail = 0;
01459                      goto restart;
01460               } else        /* dump out the memory here */
01461                      mmDumpMemInfo(bufmgr_fake->heap);
01462        }
01463 
01464        assert(ret == 0);
01465 
01466        if (bufmgr_fake->exec != NULL) {
01467               int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
01468               if (ret != 0) {
01469                      pthread_mutex_unlock(&bufmgr_fake->lock);
01470                      return ret;
01471               }
01472        } else {
01473               batch.start = bo->offset;
01474               batch.used = used;
01475               batch.cliprects = cliprects;
01476               batch.num_cliprects = num_cliprects;
01477               batch.DR1 = 0;
01478               batch.DR4 = DR4;
01479 
01480               if (drmCommandWrite
01481                   (bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
01482                    sizeof(batch))) {
01483                      drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
01484                      pthread_mutex_unlock(&bufmgr_fake->lock);
01485                      return -errno;
01486               }
01487        }
01488 
01489        drm_intel_fake_fence_validated(bo->bufmgr);
01490 
01491        drm_intel_bo_fake_post_submit(bo);
01492 
01493        pthread_mutex_unlock(&bufmgr_fake->lock);
01494 
01495        return 0;
01496 }
01497 
01505 static int
01506 drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
01507 {
01508        drm_intel_bufmgr_fake *bufmgr_fake =
01509            (drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
01510        unsigned int sz = 0;
01511        int i;
01512 
01513        for (i = 0; i < count; i++) {
01514               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo_array[i];
01515 
01516               if (bo_fake == NULL)
01517                      continue;
01518 
01519               if (!bo_fake->is_static)
01520                      sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
01521               sz += bo_fake->child_size;
01522        }
01523 
01524        if (sz > bufmgr_fake->size) {
01525               DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n",
01526                   sz / 1024, bufmgr_fake->size / 1024);
01527               return -1;
01528        }
01529 
01530        DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024,
01531            bufmgr_fake->size / 1024);
01532        return 0;
01533 }
01534 
01542 void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
01543 {
01544        drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
01545        struct block *block, *tmp;
01546 
01547        pthread_mutex_lock(&bufmgr_fake->lock);
01548 
01549        bufmgr_fake->need_fence = 1;
01550        bufmgr_fake->fail = 0;
01551 
01552        /* Wait for hardware idle.  We don't know where acceleration has been
01553         * happening, so we'll need to wait anyway before letting anything get
01554         * put on the card again.
01555         */
01556        drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
01557 
01558        /* Check that we hadn't released the lock without having fenced the last
01559         * set of buffers.
01560         */
01561        assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
01562        assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
01563 
01564        DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
01565               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
01566               /* Releases the memory, and memcpys dirty contents out if
01567                * necessary.
01568                */
01569               free_block(bufmgr_fake, block, 0);
01570               bo_fake->block = NULL;
01571        }
01572 
01573        pthread_mutex_unlock(&bufmgr_fake->lock);
01574 }
01575 
01576 void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
01577                                         volatile unsigned int
01578                                         *last_dispatch)
01579 {
01580        drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
01581 
01582        bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
01583 }
01584 
01585 drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
01586                                         unsigned long low_offset,
01587                                         void *low_virtual,
01588                                         unsigned long size,
01589                                         volatile unsigned int
01590                                         *last_dispatch)
01591 {
01592        drm_intel_bufmgr_fake *bufmgr_fake;
01593 
01594        bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
01595 
01596        if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
01597               free(bufmgr_fake);
01598               return NULL;
01599        }
01600 
01601        /* Initialize allocator */
01602        DRMINITLISTHEAD(&bufmgr_fake->fenced);
01603        DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
01604        DRMINITLISTHEAD(&bufmgr_fake->lru);
01605 
01606        bufmgr_fake->low_offset = low_offset;
01607        bufmgr_fake->virtual = low_virtual;
01608        bufmgr_fake->size = size;
01609        bufmgr_fake->heap = mmInit(low_offset, size);
01610 
01611        /* Hook in methods */
01612        bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
01613        bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
01614        bufmgr_fake->bufmgr.bo_alloc_tiled = drm_intel_fake_bo_alloc_tiled;
01615        bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
01616        bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
01617        bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
01618        bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
01619        bufmgr_fake->bufmgr.bo_subdata = drm_intel_fake_bo_subdata;
01620        bufmgr_fake->bufmgr.bo_wait_rendering =
01621            drm_intel_fake_bo_wait_rendering;
01622        bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
01623        bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
01624        bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
01625        bufmgr_fake->bufmgr.check_aperture_space =
01626            drm_intel_fake_check_aperture_space;
01627        bufmgr_fake->bufmgr.debug = 0;
01628 
01629        bufmgr_fake->fd = fd;
01630        bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
01631 
01632        return &bufmgr_fake->bufmgr;
01633 }