Back to index

libdrm  2.4.37
Classes | Defines | Typedefs | Enumerations | Functions
intel_bufmgr.h File Reference

Public definitions of Intel-specific bufmgr functions. More...

#include <stdio.h>
#include <stdint.h>
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  _drm_intel_bo
struct  _drm_intel_aub_annotation

Defines

#define BO_ALLOC_FOR_RENDER   (1<<0)
#define dri_bo   drm_intel_bo
#define dri_bufmgr   drm_intel_bufmgr
#define dri_bo_alloc   drm_intel_bo_alloc
#define dri_bo_reference   drm_intel_bo_reference
#define dri_bo_unreference   drm_intel_bo_unreference
#define dri_bo_map   drm_intel_bo_map
#define dri_bo_unmap   drm_intel_bo_unmap
#define dri_bo_subdata   drm_intel_bo_subdata
#define dri_bo_get_subdata   drm_intel_bo_get_subdata
#define dri_bo_wait_rendering   drm_intel_bo_wait_rendering
#define dri_bufmgr_set_debug   drm_intel_bufmgr_set_debug
#define dri_bufmgr_destroy   drm_intel_bufmgr_destroy
#define dri_bo_exec   drm_intel_bo_exec
#define dri_bufmgr_check_aperture_space   drm_intel_bufmgr_check_aperture_space
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset,reloc_offset, target_bo)
#define dri_bo_pin   drm_intel_bo_pin
#define dri_bo_unpin   drm_intel_bo_unpin
#define dri_bo_get_tiling   drm_intel_bo_get_tiling
#define dri_bo_set_tiling(bo, mode)   drm_intel_bo_set_tiling(bo, mode, 0)
#define dri_bo_flink   drm_intel_bo_flink
#define intel_bufmgr_gem_init   drm_intel_bufmgr_gem_init
#define intel_bo_gem_create_from_name   drm_intel_bo_gem_create_from_name
#define intel_bufmgr_gem_enable_reuse   drm_intel_bufmgr_gem_enable_reuse
#define intel_bufmgr_fake_init   drm_intel_bufmgr_fake_init
#define intel_bufmgr_fake_set_last_dispatch   drm_intel_bufmgr_fake_set_last_dispatch
#define intel_bufmgr_fake_set_exec_callback   drm_intel_bufmgr_fake_set_exec_callback
#define intel_bufmgr_fake_set_fence_callback   drm_intel_bufmgr_fake_set_fence_callback
#define intel_bo_fake_alloc_static   drm_intel_bo_fake_alloc_static
#define intel_bo_fake_disable_backing_store   drm_intel_bo_fake_disable_backing_store
#define intel_bufmgr_fake_contended_lock_take   drm_intel_bufmgr_fake_contended_lock_take
#define intel_bufmgr_fake_evict_all   drm_intel_bufmgr_fake_evict_all

Typedefs

typedef struct _drm_intel_bufmgr
typedef struct _drm_intel_context
typedef struct _drm_intel_bo
typedef struct
_drm_intel_aub_annotation 
drm_intel_aub_annotation

Enumerations

enum  aub_dump_bmp_format { AUB_DUMP_BMP_FORMAT_8BIT = 1, AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4, AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6, AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7 }

Functions

drm_intel_bo * drm_intel_bo_alloc (drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, unsigned int alignment)
drm_intel_bo * drm_intel_bo_alloc_for_render (drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, unsigned int alignment)
drm_intel_bo * drm_intel_bo_alloc_tiled (drm_intel_bufmgr *bufmgr, const char *name, int x, int y, int cpp, uint32_t *tiling_mode, unsigned long *pitch, unsigned long flags)
void drm_intel_bo_reference (drm_intel_bo *bo)
void drm_intel_bo_unreference (drm_intel_bo *bo)
int drm_intel_bo_map (drm_intel_bo *bo, int write_enable)
int drm_intel_bo_unmap (drm_intel_bo *bo)
int drm_intel_bo_subdata (drm_intel_bo *bo, unsigned long offset, unsigned long size, const void *data)
int drm_intel_bo_get_subdata (drm_intel_bo *bo, unsigned long offset, unsigned long size, void *data)
void drm_intel_bo_wait_rendering (drm_intel_bo *bo)
void drm_intel_bufmgr_set_debug (drm_intel_bufmgr *bufmgr, int enable_debug)
void drm_intel_bufmgr_destroy (drm_intel_bufmgr *bufmgr)
int drm_intel_bo_exec (drm_intel_bo *bo, int used, struct drm_clip_rect *cliprects, int num_cliprects, int DR4)
int drm_intel_bo_mrb_exec (drm_intel_bo *bo, int used, struct drm_clip_rect *cliprects, int num_cliprects, int DR4, unsigned int flags)
int drm_intel_bufmgr_check_aperture_space (drm_intel_bo **bo_array, int count)
int drm_intel_bo_emit_reloc (drm_intel_bo *bo, uint32_t offset, drm_intel_bo *target_bo, uint32_t target_offset, uint32_t read_domains, uint32_t write_domain)
int drm_intel_bo_emit_reloc_fence (drm_intel_bo *bo, uint32_t offset, drm_intel_bo *target_bo, uint32_t target_offset, uint32_t read_domains, uint32_t write_domain)
int drm_intel_bo_pin (drm_intel_bo *bo, uint32_t alignment)
int drm_intel_bo_unpin (drm_intel_bo *bo)
int drm_intel_bo_set_tiling (drm_intel_bo *bo, uint32_t *tiling_mode, uint32_t stride)
int drm_intel_bo_get_tiling (drm_intel_bo *bo, uint32_t *tiling_mode, uint32_t *swizzle_mode)
int drm_intel_bo_flink (drm_intel_bo *bo, uint32_t *name)
int drm_intel_bo_busy (drm_intel_bo *bo)
int drm_intel_bo_madvise (drm_intel_bo *bo, int madv)
int drm_intel_bo_disable_reuse (drm_intel_bo *bo)
int drm_intel_bo_is_reusable (drm_intel_bo *bo)
int drm_intel_bo_references (drm_intel_bo *bo, drm_intel_bo *target_bo)
drm_intel_bufmgr * drm_intel_bufmgr_gem_init (int fd, int batch_size)
 Initializes the GEM buffer manager, which uses the kernel to allocate, map, and manage map buffer objections.
drm_intel_bo * drm_intel_bo_gem_create_from_name (drm_intel_bufmgr *bufmgr, const char *name, unsigned int handle)
 Returns a drm_intel_bo wrapping the given buffer object handle.
void drm_intel_bufmgr_gem_enable_reuse (drm_intel_bufmgr *bufmgr)
 Enables unlimited caching of buffer objects for reuse.
void drm_intel_bufmgr_gem_enable_fenced_relocs (drm_intel_bufmgr *bufmgr)
 Enable use of fenced reloc type.
void drm_intel_bufmgr_gem_set_vma_cache_size (drm_intel_bufmgr *bufmgr, int limit)
int drm_intel_gem_bo_map_unsynchronized (drm_intel_bo *bo)
 Performs a mapping of the buffer object like the normal GTT mapping, but avoids waiting for the GPU to be done reading from or rendering to the buffer.
int drm_intel_gem_bo_map_gtt (drm_intel_bo *bo)
int drm_intel_gem_bo_unmap_gtt (drm_intel_bo *bo)
int drm_intel_gem_bo_get_reloc_count (drm_intel_bo *bo)
void drm_intel_gem_bo_clear_relocs (drm_intel_bo *bo, int start)
 Removes existing relocation entries in the BO after "start".
void drm_intel_gem_bo_start_gtt_access (drm_intel_bo *bo, int write_enable)
 Sets the object to the GTT read and possibly write domain, used by the X 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
void drm_intel_bufmgr_gem_set_aub_dump (drm_intel_bufmgr *bufmgr, int enable)
 Sets up AUB dumping.
void drm_intel_gem_bo_aub_dump_bmp (drm_intel_bo *bo, int x1, int y1, int width, int height, enum aub_dump_bmp_format format, int pitch, int offset)
void drm_intel_bufmgr_gem_set_aub_annotations (drm_intel_bo *bo, drm_intel_aub_annotation *annotations, unsigned count)
 Annotate the given bo for use in aub dumping.
int drm_intel_get_pipe_from_crtc_id (drm_intel_bufmgr *bufmgr, int crtc_id)
int drm_intel_get_aperture_sizes (int fd, size_t *mappable, size_t *total)
int drm_intel_bufmgr_gem_get_devid (drm_intel_bufmgr *bufmgr)
int drm_intel_gem_bo_wait (drm_intel_bo *bo, int64_t timeout_ns)
 Waits on a BO for the given amount of time.
drm_intel_context * drm_intel_gem_context_create (drm_intel_bufmgr *bufmgr)
void drm_intel_gem_context_destroy (drm_intel_context *ctx)
int drm_intel_gem_bo_context_exec (drm_intel_bo *bo, drm_intel_context *ctx, int used, unsigned int flags)
drm_intel_bufmgr * drm_intel_bufmgr_fake_init (int fd, unsigned long low_offset, void *low_virtual, unsigned long size, volatile unsigned int *last_dispatch)
void drm_intel_bufmgr_fake_set_last_dispatch (drm_intel_bufmgr *bufmgr, volatile unsigned int *last_dispatch)
void drm_intel_bufmgr_fake_set_exec_callback (drm_intel_bufmgr *bufmgr, int(*exec)(drm_intel_bo *bo, unsigned int used, void *priv), void *priv)
void drm_intel_bufmgr_fake_set_fence_callback (drm_intel_bufmgr *bufmgr, unsigned int(*emit)(void *priv), void(*wait)(unsigned int fence, void *priv), void *priv)
drm_intel_bo * drm_intel_bo_fake_alloc_static (drm_intel_bufmgr *bufmgr, const char *name, unsigned long offset, unsigned long size, void *virt)
void drm_intel_bo_fake_disable_backing_store (drm_intel_bo *bo, void(*invalidate_cb)(drm_intel_bo *bo, void *ptr), void *ptr)
 Set the buffer as not requiring backing store, and instead get the callback invoked whenever it would be set dirty.
void drm_intel_bufmgr_fake_contended_lock_take (drm_intel_bufmgr *bufmgr)
void drm_intel_bufmgr_fake_evict_all (drm_intel_bufmgr *bufmgr)
 Evicts all buffers, waiting for fences to pass and copying contents out as necessary.
struct drm_intel_decodedrm_intel_decode_context_alloc (uint32_t devid)
void drm_intel_decode_context_free (struct drm_intel_decode *ctx)
void drm_intel_decode_set_batch_pointer (struct drm_intel_decode *ctx, void *data, uint32_t hw_offset, int count)
void drm_intel_decode_set_dump_past_end (struct drm_intel_decode *ctx, int dump_past_end)
void drm_intel_decode_set_head_tail (struct drm_intel_decode *ctx, uint32_t head, uint32_t tail)
void drm_intel_decode_set_output_file (struct drm_intel_decode *ctx, FILE *out)
void drm_intel_decode (struct drm_intel_decode *ctx)
 Decodes an i830-i915 batch buffer, writing the output to stdout.

Detailed Description

Public definitions of Intel-specific bufmgr functions.

Definition in file intel_bufmgr.h.


Class Documentation

struct _drm_intel_bo

Definition at line 47 of file intel_bufmgr.h.

Class Members
unsigned long align Alignment requirement for object. Used for GTT mapping & pinning the object.
drm_intel_bufmgr * bufmgr Buffer manager context associated with this buffer object.
int handle MM-specific handle for accessing object.
unsigned long offset Last seen card virtual address (offset from the beginning of the aperture) for the object. This should be used to fill relocation entries when calling drm_intel_bo_emit_reloc()
unsigned long size Size in bytes of the buffer object. The size may be larger than the size originally requested for the allocation, such as being aligned to page size.
void * virtual Virtual address for accessing the buffer data. Only valid while mapped.
struct _drm_intel_aub_annotation

Definition at line 96 of file intel_bufmgr.h.

Class Members
uint32_t ending_offset
uint32_t subtype
uint32_t type

Define Documentation

#define BO_ALLOC_FOR_RENDER   (1<<0)

Definition at line 102 of file intel_bufmgr.h.

#define dri_bo   drm_intel_bo

Compatibility defines to keep old code building despite the symbol rename from dri_* to drm_intel_*

Definition at line 244 of file intel_bufmgr.h.

Definition at line 246 of file intel_bufmgr.h.

#define dri_bo_emit_reloc (   reloc_bo,
  read,
  write,
  target_offset,
  reloc_offset,
  target_bo 
)
Value:
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset,                \
                            target_bo, target_offset,          \
                            read, write);

Definition at line 258 of file intel_bufmgr.h.

Definition at line 256 of file intel_bufmgr.h.

Definition at line 267 of file intel_bufmgr.h.

Definition at line 252 of file intel_bufmgr.h.

Definition at line 265 of file intel_bufmgr.h.

Definition at line 249 of file intel_bufmgr.h.

Definition at line 263 of file intel_bufmgr.h.

Definition at line 247 of file intel_bufmgr.h.

#define dri_bo_set_tiling (   bo,
  mode 
)    drm_intel_bo_set_tiling(bo, mode, 0)

Definition at line 266 of file intel_bufmgr.h.

Definition at line 251 of file intel_bufmgr.h.

Definition at line 250 of file intel_bufmgr.h.

Definition at line 264 of file intel_bufmgr.h.

Definition at line 248 of file intel_bufmgr.h.

Definition at line 253 of file intel_bufmgr.h.

#define dri_bufmgr   drm_intel_bufmgr

Definition at line 245 of file intel_bufmgr.h.

Definition at line 257 of file intel_bufmgr.h.

Definition at line 255 of file intel_bufmgr.h.

Definition at line 254 of file intel_bufmgr.h.

Definition at line 275 of file intel_bufmgr.h.

Definition at line 276 of file intel_bufmgr.h.

Definition at line 269 of file intel_bufmgr.h.

Definition at line 277 of file intel_bufmgr.h.

Definition at line 278 of file intel_bufmgr.h.

Definition at line 271 of file intel_bufmgr.h.

Definition at line 273 of file intel_bufmgr.h.

Definition at line 274 of file intel_bufmgr.h.

Definition at line 272 of file intel_bufmgr.h.

Definition at line 270 of file intel_bufmgr.h.

Definition at line 268 of file intel_bufmgr.h.


Typedef Documentation

typedef struct _drm_intel_bo

Definition at line 45 of file intel_bufmgr.h.

typedef struct _drm_intel_bufmgr

Definition at line 43 of file intel_bufmgr.h.

typedef struct _drm_intel_context

Definition at line 44 of file intel_bufmgr.h.


Enumeration Type Documentation

Enumerator:
AUB_DUMP_BMP_FORMAT_8BIT 
AUB_DUMP_BMP_FORMAT_ARGB_4444 
AUB_DUMP_BMP_FORMAT_ARGB_0888 
AUB_DUMP_BMP_FORMAT_ARGB_8888 

Definition at line 89 of file intel_bufmgr.h.


Function Documentation

drm_intel_bo* drm_intel_bo_alloc ( drm_intel_bufmgr *  bufmgr,
const char *  name,
unsigned long  size,
unsigned int  alignment 
)

Definition at line 49 of file intel_bufmgr.c.

{
       return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
drm_intel_bo* drm_intel_bo_alloc_for_render ( drm_intel_bufmgr *  bufmgr,
const char *  name,
unsigned long  size,
unsigned int  alignment 
)

Definition at line 55 of file intel_bufmgr.c.

{
       return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
drm_intel_bo* drm_intel_bo_alloc_tiled ( drm_intel_bufmgr *  bufmgr,
const char *  name,
int  x,
int  y,
int  cpp,
uint32_t *  tiling_mode,
unsigned long *  pitch,
unsigned long  flags 
)

Definition at line 64 of file intel_bufmgr.c.

{
       return bufmgr->bo_alloc_tiled(bufmgr, name, x, y, cpp,
                                  tiling_mode, pitch, flags);
}
int drm_intel_bo_busy ( drm_intel_bo *  bo)

Definition at line 249 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_busy)
              return bo->bufmgr->bo_busy(bo);
       return 0;
}
int drm_intel_bo_disable_reuse ( drm_intel_bo *  bo)

Definition at line 235 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_disable_reuse)
              return bo->bufmgr->bo_disable_reuse(bo);
       return 0;
}
int drm_intel_bo_emit_reloc ( drm_intel_bo *  bo,
uint32_t  offset,
drm_intel_bo *  target_bo,
uint32_t  target_offset,
uint32_t  read_domains,
uint32_t  write_domain 
)

Definition at line 177 of file intel_bufmgr.c.

{
       return bo->bufmgr->bo_emit_reloc(bo, offset,
                                    target_bo, target_offset,
                                    read_domains, write_domain);
}
int drm_intel_bo_emit_reloc_fence ( drm_intel_bo *  bo,
uint32_t  offset,
drm_intel_bo *  target_bo,
uint32_t  target_offset,
uint32_t  read_domains,
uint32_t  write_domain 
)

Definition at line 188 of file intel_bufmgr.c.

{
       return bo->bufmgr->bo_emit_reloc_fence(bo, offset,
                                          target_bo, target_offset,
                                          read_domains, write_domain);
}
int drm_intel_bo_exec ( drm_intel_bo *  bo,
int  used,
struct drm_clip_rect cliprects,
int  num_cliprects,
int  DR4 
)
drm_intel_bo* drm_intel_bo_fake_alloc_static ( drm_intel_bufmgr *  bufmgr,
const char *  name,
unsigned long  offset,
unsigned long  size,
void *  virt 
)

Definition at line 871 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake;
       drm_intel_bo_fake *bo_fake;

       bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;

       assert(size != 0);

       bo_fake = calloc(1, sizeof(*bo_fake));
       if (!bo_fake)
              return NULL;

       bo_fake->bo.size = size;
       bo_fake->bo.offset = offset;
       bo_fake->bo.virtual = virtual;
       bo_fake->bo.bufmgr = bufmgr;
       bo_fake->refcount = 1;
       bo_fake->id = ++bufmgr_fake->buf_nr;
       bo_fake->name = name;
       bo_fake->flags = BM_PINNED;
       bo_fake->is_static = 1;

       DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id,
           bo_fake->name, bo_fake->bo.size / 1024);

       return &bo_fake->bo;
}
void drm_intel_bo_fake_disable_backing_store ( drm_intel_bo *  bo,
void(*)(drm_intel_bo *bo, void *ptr)  invalidate_cb,
void *  ptr 
)

Set the buffer as not requiring backing store, and instead get the callback invoked whenever it would be set dirty.

Definition at line 966 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake =
           (drm_intel_bufmgr_fake *) bo->bufmgr;
       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;

       pthread_mutex_lock(&bufmgr_fake->lock);

       if (bo_fake->backing_store)
              free_backing_store(bo);

       bo_fake->flags |= BM_NO_BACKING_STORE;

       DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
       bo_fake->dirty = 1;
       bo_fake->invalidate_cb = invalidate_cb;
       bo_fake->invalidate_ptr = ptr;

       /* Note that it is invalid right from the start.  Also note
        * invalidate_cb is called with the bufmgr locked, so cannot
        * itself make bufmgr calls.
        */
       if (invalidate_cb != NULL)
              invalidate_cb(bo, ptr);

       pthread_mutex_unlock(&bufmgr_fake->lock);
}

Here is the call graph for this function:

int drm_intel_bo_flink ( drm_intel_bo *  bo,
uint32_t *  name 
)

Definition at line 168 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_flink)
              return bo->bufmgr->bo_flink(bo, name);

       return -ENODEV;
}
drm_intel_bo* drm_intel_bo_gem_create_from_name ( drm_intel_bufmgr *  bufmgr,
const char *  name,
unsigned int  handle 
)

Returns a drm_intel_bo wrapping the given buffer object handle.

This can be used when one application needs to pass a buffer object to another.

Definition at line 833 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
       drm_intel_bo_gem *bo_gem;
       int ret;
       struct drm_gem_open open_arg;
       struct drm_i915_gem_get_tiling get_tiling;
       drmMMListHead *list;

       /* At the moment most applications only have a few named bo.
        * For instance, in a DRI client only the render buffers passed
        * between X and the client are named. And since X returns the
        * alternating names for the front/back buffer a linear search
        * provides a sufficiently fast match.
        */
       for (list = bufmgr_gem->named.next;
            list != &bufmgr_gem->named;
            list = list->next) {
              bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
              if (bo_gem->global_name == handle) {
                     drm_intel_gem_bo_reference(&bo_gem->bo);
                     return &bo_gem->bo;
              }
       }

       bo_gem = calloc(1, sizeof(*bo_gem));
       if (!bo_gem)
              return NULL;

       VG_CLEAR(open_arg);
       open_arg.name = handle;
       ret = drmIoctl(bufmgr_gem->fd,
                     DRM_IOCTL_GEM_OPEN,
                     &open_arg);
       if (ret != 0) {
              DBG("Couldn't reference %s handle 0x%08x: %s\n",
                  name, handle, strerror(errno));
              free(bo_gem);
              return NULL;
       }
       bo_gem->bo.size = open_arg.size;
       bo_gem->bo.offset = 0;
       bo_gem->bo.virtual = NULL;
       bo_gem->bo.bufmgr = bufmgr;
       bo_gem->name = name;
       atomic_set(&bo_gem->refcount, 1);
       bo_gem->validate_index = -1;
       bo_gem->gem_handle = open_arg.handle;
       bo_gem->bo.handle = open_arg.handle;
       bo_gem->global_name = handle;
       bo_gem->reusable = false;

       VG_CLEAR(get_tiling);
       get_tiling.handle = bo_gem->gem_handle;
       ret = drmIoctl(bufmgr_gem->fd,
                     DRM_IOCTL_I915_GEM_GET_TILING,
                     &get_tiling);
       if (ret != 0) {
              drm_intel_gem_bo_unreference(&bo_gem->bo);
              return NULL;
       }
       bo_gem->tiling_mode = get_tiling.tiling_mode;
       bo_gem->swizzle_mode = get_tiling.swizzle_mode;
       /* XXX stride is unknown */
       drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);

       DRMINITLISTHEAD(&bo_gem->vma_list);
       DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
       DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);

       return &bo_gem->bo;
}

Here is the call graph for this function:

int drm_intel_bo_get_subdata ( drm_intel_bo *  bo,
unsigned long  offset,
unsigned long  size,
void *  data 
)

Definition at line 103 of file intel_bufmgr.c.

{
       int ret;
       if (bo->bufmgr->bo_get_subdata)
              return bo->bufmgr->bo_get_subdata(bo, offset, size, data);

       if (size == 0 || data == NULL)
              return 0;

       ret = drm_intel_bo_map(bo, 0);
       if (ret)
              return ret;
       memcpy(data, (unsigned char *)bo->virtual + offset, size);
       drm_intel_bo_unmap(bo);
       return 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

int drm_intel_bo_get_tiling ( drm_intel_bo *  bo,
uint32_t *  tiling_mode,
uint32_t *  swizzle_mode 
)

Definition at line 224 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_get_tiling)
              return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);

       *tiling_mode = I915_TILING_NONE;
       *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
       return 0;
}
int drm_intel_bo_is_reusable ( drm_intel_bo *  bo)

Definition at line 242 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_is_reusable)
              return bo->bufmgr->bo_is_reusable(bo);
       return 0;
}
int drm_intel_bo_madvise ( drm_intel_bo *  bo,
int  madv 
)

Definition at line 256 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_madvise)
              return bo->bufmgr->bo_madvise(bo, madv);
       return -1;
}
int drm_intel_bo_map ( drm_intel_bo *  bo,
int  write_enable 
)

Definition at line 85 of file intel_bufmgr.c.

{
       return buf->bufmgr->bo_map(buf, write_enable);
}

Here is the caller graph for this function:

int drm_intel_bo_mrb_exec ( drm_intel_bo *  bo,
int  used,
struct drm_clip_rect cliprects,
int  num_cliprects,
int  DR4,
unsigned int  flags 
)
int drm_intel_bo_pin ( drm_intel_bo *  bo,
uint32_t  alignment 
)

Definition at line 198 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_pin)
              return bo->bufmgr->bo_pin(bo, alignment);

       return -ENODEV;
}
void drm_intel_bo_reference ( drm_intel_bo *  bo)

Definition at line 72 of file intel_bufmgr.c.

{
       bo->bufmgr->bo_reference(bo);
}
int drm_intel_bo_references ( drm_intel_bo *  bo,
drm_intel_bo *  target_bo 
)

Definition at line 263 of file intel_bufmgr.c.

{
       return bo->bufmgr->bo_references(bo, target_bo);
}
int drm_intel_bo_set_tiling ( drm_intel_bo *  bo,
uint32_t *  tiling_mode,
uint32_t  stride 
)

Definition at line 214 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_set_tiling)
              return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);

       *tiling_mode = I915_TILING_NONE;
       return 0;
}
int drm_intel_bo_subdata ( drm_intel_bo *  bo,
unsigned long  offset,
unsigned long  size,
const void *  data 
)

Definition at line 96 of file intel_bufmgr.c.

{
       return bo->bufmgr->bo_subdata(bo, offset, size, data);
}
int drm_intel_bo_unmap ( drm_intel_bo *  bo)

Definition at line 90 of file intel_bufmgr.c.

{
       return buf->bufmgr->bo_unmap(buf);
}

Here is the caller graph for this function:

int drm_intel_bo_unpin ( drm_intel_bo *  bo)

Definition at line 206 of file intel_bufmgr.c.

{
       if (bo->bufmgr->bo_unpin)
              return bo->bufmgr->bo_unpin(bo);

       return -ENODEV;
}
void drm_intel_bo_unreference ( drm_intel_bo *  bo)

Definition at line 77 of file intel_bufmgr.c.

{
       if (bo == NULL)
              return;

       bo->bufmgr->bo_unreference(bo);
}
void drm_intel_bo_wait_rendering ( drm_intel_bo *  bo)

Definition at line 121 of file intel_bufmgr.c.

{
       bo->bufmgr->bo_wait_rendering(bo);
}
int drm_intel_bufmgr_check_aperture_space ( drm_intel_bo **  bo_array,
int  count 
)

Definition at line 163 of file intel_bufmgr.c.

{
       return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
void drm_intel_bufmgr_destroy ( drm_intel_bufmgr *  bufmgr)

Definition at line 126 of file intel_bufmgr.c.

{
       bufmgr->destroy(bufmgr);
}
void drm_intel_bufmgr_fake_contended_lock_take ( drm_intel_bufmgr *  bufmgr)

Definition at line 775 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
       struct block *block, *tmp;

       pthread_mutex_lock(&bufmgr_fake->lock);

       bufmgr_fake->need_fence = 1;
       bufmgr_fake->fail = 0;

       /* Wait for hardware idle.  We don't know where acceleration has been
        * happening, so we'll need to wait anyway before letting anything get
        * put on the card again.
        */
       drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);

       /* Check that we hadn't released the lock without having fenced the last
        * set of buffers.
        */
       assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
       assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));

       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
              assert(_fence_test(bufmgr_fake, block->fence));
              set_dirty(block->bo);
       }

       pthread_mutex_unlock(&bufmgr_fake->lock);
}

Here is the call graph for this function:

void drm_intel_bufmgr_fake_evict_all ( drm_intel_bufmgr *  bufmgr)

Evicts all buffers, waiting for fences to pass and copying contents out as necessary.

Used by the X Server on LeaveVT, when the card memory is no longer our own.

Definition at line 1542 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
       struct block *block, *tmp;

       pthread_mutex_lock(&bufmgr_fake->lock);

       bufmgr_fake->need_fence = 1;
       bufmgr_fake->fail = 0;

       /* Wait for hardware idle.  We don't know where acceleration has been
        * happening, so we'll need to wait anyway before letting anything get
        * put on the card again.
        */
       drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);

       /* Check that we hadn't released the lock without having fenced the last
        * set of buffers.
        */
       assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
       assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));

       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
              drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
              /* Releases the memory, and memcpys dirty contents out if
               * necessary.
               */
              free_block(bufmgr_fake, block, 0);
              bo_fake->block = NULL;
       }

       pthread_mutex_unlock(&bufmgr_fake->lock);
}

Here is the call graph for this function:

drm_intel_bufmgr* drm_intel_bufmgr_fake_init ( int  fd,
unsigned long  low_offset,
void *  low_virtual,
unsigned long  size,
volatile unsigned int *  last_dispatch 
)

Definition at line 1585 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake;

       bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));

       if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
              free(bufmgr_fake);
              return NULL;
       }

       /* Initialize allocator */
       DRMINITLISTHEAD(&bufmgr_fake->fenced);
       DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
       DRMINITLISTHEAD(&bufmgr_fake->lru);

       bufmgr_fake->low_offset = low_offset;
       bufmgr_fake->virtual = low_virtual;
       bufmgr_fake->size = size;
       bufmgr_fake->heap = mmInit(low_offset, size);

       /* Hook in methods */
       bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
       bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
       bufmgr_fake->bufmgr.bo_alloc_tiled = drm_intel_fake_bo_alloc_tiled;
       bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
       bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
       bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
       bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
       bufmgr_fake->bufmgr.bo_subdata = drm_intel_fake_bo_subdata;
       bufmgr_fake->bufmgr.bo_wait_rendering =
           drm_intel_fake_bo_wait_rendering;
       bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
       bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
       bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
       bufmgr_fake->bufmgr.check_aperture_space =
           drm_intel_fake_check_aperture_space;
       bufmgr_fake->bufmgr.debug = 0;

       bufmgr_fake->fd = fd;
       bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;

       return &bufmgr_fake->bufmgr;
}

Here is the call graph for this function:

void drm_intel_bufmgr_fake_set_exec_callback ( drm_intel_bufmgr *  bufmgr,
int(*)(drm_intel_bo *bo, unsigned int used, void *priv)  exec,
void *  priv 
)

Definition at line 1420 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;

       bufmgr_fake->exec = exec;
       bufmgr_fake->exec_priv = priv;
}
void drm_intel_bufmgr_fake_set_fence_callback ( drm_intel_bufmgr *  bufmgr,
unsigned int(*)(void *priv)  emit,
void(*)(unsigned int fence, void *priv)  wait,
void *  priv 
)

Definition at line 252 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;

       bufmgr_fake->fence_emit = emit;
       bufmgr_fake->fence_wait = wait;
       bufmgr_fake->fence_priv = priv;
}
void drm_intel_bufmgr_fake_set_last_dispatch ( drm_intel_bufmgr *  bufmgr,
volatile unsigned int *  last_dispatch 
)

Definition at line 1576 of file intel_bufmgr_fake.c.

{
       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;

       bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
}
void drm_intel_bufmgr_gem_enable_fenced_relocs ( drm_intel_bufmgr *  bufmgr)

Enable use of fenced reloc type.

New code should enable this to avoid unnecessary fence register allocation. If this option is not enabled, all relocs will have fence register allocated.

Definition at line 2466 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;

       if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
              bufmgr_gem->fenced_relocs = true;
}

Here is the call graph for this function:

void drm_intel_bufmgr_gem_enable_reuse ( drm_intel_bufmgr *  bufmgr)

Enables unlimited caching of buffer objects for reuse.

This is potentially very memory expensive, as the cache at each bucket size is only bounded by how many buffers of that size we've managed to have in flight at once.

Definition at line 2451 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;

       bufmgr_gem->bo_reuse = true;
}
int drm_intel_bufmgr_gem_get_devid ( drm_intel_bufmgr *  bufmgr)

Definition at line 2780 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;

       return bufmgr_gem->pci_device;
}
drm_intel_bufmgr* drm_intel_bufmgr_gem_init ( int  fd,
int  batch_size 
)

Initializes the GEM buffer manager, which uses the kernel to allocate, map, and manage map buffer objections.

Parameters:
fdFile descriptor of the opened DRM device.

Definition at line 2936 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem;
       struct drm_i915_gem_get_aperture aperture;
       drm_i915_getparam_t gp;
       int ret, tmp;
       bool exec2 = false;

       bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
       if (bufmgr_gem == NULL)
              return NULL;

       bufmgr_gem->fd = fd;

       if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
              free(bufmgr_gem);
              return NULL;
       }

       ret = drmIoctl(bufmgr_gem->fd,
                     DRM_IOCTL_I915_GEM_GET_APERTURE,
                     &aperture);

       if (ret == 0)
              bufmgr_gem->gtt_size = aperture.aper_available_size;
       else {
              fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
                     strerror(errno));
              bufmgr_gem->gtt_size = 128 * 1024 * 1024;
              fprintf(stderr, "Assuming %dkB available aperture size.\n"
                     "May lead to reduced performance or incorrect "
                     "rendering.\n",
                     (int)bufmgr_gem->gtt_size / 1024);
       }

       bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);

       if (IS_GEN2(bufmgr_gem->pci_device))
              bufmgr_gem->gen = 2;
       else if (IS_GEN3(bufmgr_gem->pci_device))
              bufmgr_gem->gen = 3;
       else if (IS_GEN4(bufmgr_gem->pci_device))
              bufmgr_gem->gen = 4;
       else if (IS_GEN5(bufmgr_gem->pci_device))
              bufmgr_gem->gen = 5;
       else if (IS_GEN6(bufmgr_gem->pci_device))
              bufmgr_gem->gen = 6;
       else if (IS_GEN7(bufmgr_gem->pci_device))
              bufmgr_gem->gen = 7;
       else
              assert(0);

       if (IS_GEN3(bufmgr_gem->pci_device) &&
           bufmgr_gem->gtt_size > 256*1024*1024) {
              /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
               * be used for tiled blits. To simplify the accounting, just
               * substract the unmappable part (fixed to 256MB on all known
               * gen3 devices) if the kernel advertises it. */
              bufmgr_gem->gtt_size -= 256*1024*1024;
       }

       VG_CLEAR(gp);
       gp.value = &tmp;

       gp.param = I915_PARAM_HAS_EXECBUF2;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
       if (!ret)
              exec2 = true;

       gp.param = I915_PARAM_HAS_BSD;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
       bufmgr_gem->has_bsd = ret == 0;

       gp.param = I915_PARAM_HAS_BLT;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
       bufmgr_gem->has_blt = ret == 0;

       gp.param = I915_PARAM_HAS_RELAXED_FENCING;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
       bufmgr_gem->has_relaxed_fencing = ret == 0;

       gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
       bufmgr_gem->has_wait_timeout = ret == 0;

       gp.param = I915_PARAM_HAS_LLC;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
       if (ret != 0) {
              /* Kernel does not supports HAS_LLC query, fallback to GPU
               * generation detection and assume that we have LLC on GEN6/7
               */
              bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
                            IS_GEN7(bufmgr_gem->pci_device));
       } else
              bufmgr_gem->has_llc = ret == 0;

       if (bufmgr_gem->gen < 4) {
              gp.param = I915_PARAM_NUM_FENCES_AVAIL;
              gp.value = &bufmgr_gem->available_fences;
              ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
              if (ret) {
                     fprintf(stderr, "get fences failed: %d [%d]\n", ret,
                            errno);
                     fprintf(stderr, "param: %d, val: %d\n", gp.param,
                            *gp.value);
                     bufmgr_gem->available_fences = 0;
              } else {
                     /* XXX The kernel reports the total number of fences,
                      * including any that may be pinned.
                      *
                      * We presume that there will be at least one pinned
                      * fence for the scanout buffer, but there may be more
                      * than one scanout and the user may be manually
                      * pinning buffers. Let's move to execbuffer2 and
                      * thereby forget the insanity of using fences...
                      */
                     bufmgr_gem->available_fences -= 2;
                     if (bufmgr_gem->available_fences < 0)
                            bufmgr_gem->available_fences = 0;
              }
       }

       /* Let's go with one relocation per every 2 dwords (but round down a bit
        * since a power of two will mean an extra page allocation for the reloc
        * buffer).
        *
        * Every 4 was too few for the blender benchmark.
        */
       bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;

       bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
       bufmgr_gem->bufmgr.bo_alloc_for_render =
           drm_intel_gem_bo_alloc_for_render;
       bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
       bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
       bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
       bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
       bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
       bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
       bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
       bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
       bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
       bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
       bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
       bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
       bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
       bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
       bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
       /* Use the new one if available */
       if (exec2) {
              bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
              bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
       } else
              bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
       bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
       bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
       bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
       bufmgr_gem->bufmgr.debug = 0;
       bufmgr_gem->bufmgr.check_aperture_space =
           drm_intel_gem_check_aperture_space;
       bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
       bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
       bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
           drm_intel_gem_get_pipe_from_crtc_id;
       bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;

       DRMINITLISTHEAD(&bufmgr_gem->named);
       init_cache_buckets(bufmgr_gem);

       DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
       bufmgr_gem->vma_max = -1; /* unlimited by default */

       return &bufmgr_gem->bufmgr;
}

Here is the call graph for this function:

void drm_intel_bufmgr_gem_set_aub_annotations ( drm_intel_bo *  bo,
drm_intel_aub_annotation annotations,
unsigned  count 
)

Annotate the given bo for use in aub dumping.

Parameters:
annotationsis an array of drm_intel_aub_annotation objects describing the type of data in various sections of the bo. Each element of the array specifies the type and subtype of a section of the bo, and the past-the-end offset of that section. The elements of annotations must be sorted so that ending_offset is increasing.
countis the number of elements in the annotations array. If count is zero, then annotations will not be dereferenced.

Annotations are copied into a private data structure, so caller may re-use the memory pointed to by annotations after the call returns.

Annotations are stored for the lifetime of the bo; to reset to the default state (no annotations), call this function with a count of zero.

Definition at line 2910 of file intel_bufmgr_gem.c.

{
       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
       unsigned size = sizeof(*annotations) * count;
       drm_intel_aub_annotation *new_annotations =
              count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
       if (new_annotations == NULL) {
              free(bo_gem->aub_annotations);
              bo_gem->aub_annotations = NULL;
              bo_gem->aub_annotation_count = 0;
              return;
       }
       memcpy(new_annotations, annotations, size);
       bo_gem->aub_annotations = new_annotations;
       bo_gem->aub_annotation_count = count;
}

Here is the caller graph for this function:

void drm_intel_bufmgr_gem_set_aub_dump ( drm_intel_bufmgr *  bufmgr,
int  enable 
)

Sets up AUB dumping.

This is a trace file format that can be used with the simulator. Packets are emitted in a format somewhat like GPU command packets. You can set up a GTT and upload your objects into the referenced space, then send off batchbuffers and get BMPs out the other end.

Definition at line 2796 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
       int entry = 0x200003;
       int i;
       int gtt_size = 0x10000;

       if (!enable) {
              if (bufmgr_gem->aub_file) {
                     fclose(bufmgr_gem->aub_file);
                     bufmgr_gem->aub_file = NULL;
              }
       }

       if (geteuid() != getuid())
              return;

       bufmgr_gem->aub_file = fopen("intel.aub", "w+");
       if (!bufmgr_gem->aub_file)
              return;

       /* Start allocating objects from just after the GTT. */
       bufmgr_gem->aub_offset = gtt_size;

       /* Start with a (required) version packet. */
       aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
       aub_out(bufmgr_gem,
              (4 << AUB_HEADER_MAJOR_SHIFT) |
              (0 << AUB_HEADER_MINOR_SHIFT));
       for (i = 0; i < 8; i++) {
              aub_out(bufmgr_gem, 0); /* app name */
       }
       aub_out(bufmgr_gem, 0); /* timestamp */
       aub_out(bufmgr_gem, 0); /* timestamp */
       aub_out(bufmgr_gem, 0); /* comment len */

       /* Set up the GTT. The max we can handle is 256M */
       aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
       aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
       aub_out(bufmgr_gem, 0); /* subtype */
       aub_out(bufmgr_gem, 0); /* offset */
       aub_out(bufmgr_gem, gtt_size); /* size */
       for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
              aub_out(bufmgr_gem, entry);
       }
}

Here is the call graph for this function:

void drm_intel_bufmgr_gem_set_vma_cache_size ( drm_intel_bufmgr *  bufmgr,
int  limit 
)

Definition at line 2738 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;

       bufmgr_gem->vma_max = limit;

       drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
}

Here is the call graph for this function:

void drm_intel_bufmgr_set_debug ( drm_intel_bufmgr *  bufmgr,
int  enable_debug 
)

Definition at line 158 of file intel_bufmgr.c.

{
       bufmgr->debug = enable_debug;
}
void drm_intel_decode ( struct drm_intel_decode ctx)

Decodes an i830-i915 batch buffer, writing the output to stdout.

Parameters:
databatch buffer contents
countnumber of DWORDs to decode in the batch buffer
hw_offsethardware address for the buffer

Definition at line 3891 of file intel_decode.c.

{
       int ret;
       unsigned int index = 0;
       uint32_t devid;
       int size = ctx->base_count * 4;
       void *temp;

       if (!ctx)
              return;

       /* Put a scratch page full of obviously undefined data after
        * the batchbuffer.  This lets us avoid a bunch of length
        * checking in statically sized packets.
        */
       temp = malloc(size + 4096);
       memcpy(temp, ctx->base_data, size);
       memset((char *)temp + size, 0xd0, 4096);
       ctx->data = temp;

       ctx->hw_offset = ctx->base_hw_offset;
       ctx->count = ctx->base_count;

       devid = ctx->devid;
       head_offset = ctx->head;
       tail_offset = ctx->tail;
       out = ctx->out;

       saved_s2_set = 0;
       saved_s4_set = 1;

       while (ctx->count > 0) {
              index = 0;

              switch ((ctx->data[index] & 0xe0000000) >> 29) {
              case 0x0:
                     ret = decode_mi(ctx);

                     /* If MI_BATCHBUFFER_END happened, then dump
                      * the rest of the output in case we some day
                      * want it in debugging, but don't decode it
                      * since it'll just confuse in the common
                      * case.
                      */
                     if (ret == -1) {
                            if (ctx->dump_past_end) {
                                   index++;
                            } else {
                                   for (index = index + 1; index < ctx->count;
                                        index++) {
                                          instr_out(ctx, index, "\n");
                                   }
                            }
                     } else
                            index += ret;
                     break;
              case 0x2:
                     index += decode_2d(ctx);
                     break;
              case 0x3:
                     if (IS_9XX(devid) && !IS_GEN3(devid)) {
                            index +=
                                decode_3d_965(ctx);
                     } else if (IS_GEN3(devid)) {
                            index += decode_3d(ctx);
                     } else {
                            index +=
                                decode_3d_i830(ctx);
                     }
                     break;
              default:
                     instr_out(ctx, index, "UNKNOWN\n");
                     index++;
                     break;
              }
              fflush(out);

              if (ctx->count < index)
                     break;

              ctx->count -= index;
              ctx->data += index;
              ctx->hw_offset += 4 * index;
       }

       free(temp);
}

Here is the call graph for this function:

Here is the caller graph for this function:

struct drm_intel_decode* drm_intel_decode_context_alloc ( uint32_t  devid) [read]

Definition at line 3817 of file intel_decode.c.

{
       struct drm_intel_decode *ctx;

       ctx = calloc(1, sizeof(struct drm_intel_decode));
       if (!ctx)
              return NULL;

       ctx->devid = devid;
       ctx->out = stdout;

       if (IS_GEN7(devid))
              ctx->gen = 7;
       else if (IS_GEN6(devid))
              ctx->gen = 6;
       else if (IS_GEN5(devid))
              ctx->gen = 5;
       else if (IS_GEN4(devid))
              ctx->gen = 4;
       else if (IS_9XX(devid))
              ctx->gen = 3;
       else {
              assert(IS_GEN2(devid));
              ctx->gen = 2;
       }

       return ctx;
}

Here is the caller graph for this function:

Definition at line 3847 of file intel_decode.c.

{
       free(ctx);
}

Here is the caller graph for this function:

void drm_intel_decode_set_batch_pointer ( struct drm_intel_decode ctx,
void *  data,
uint32_t  hw_offset,
int  count 
)

Definition at line 3860 of file intel_decode.c.

{
       ctx->base_data = data;
       ctx->base_hw_offset = hw_offset;
       ctx->base_count = count;
}

Here is the caller graph for this function:

void drm_intel_decode_set_dump_past_end ( struct drm_intel_decode ctx,
int  dump_past_end 
)

Definition at line 3853 of file intel_decode.c.

void drm_intel_decode_set_head_tail ( struct drm_intel_decode ctx,
uint32_t  head,
uint32_t  tail 
)

Definition at line 3869 of file intel_decode.c.

{
       ctx->head = head;
       ctx->tail = tail;
}
void drm_intel_decode_set_output_file ( struct drm_intel_decode ctx,
FILE *  out 
)

Definition at line 3877 of file intel_decode.c.

{
       ctx->out = out;
}

Here is the caller graph for this function:

void drm_intel_gem_bo_aub_dump_bmp ( drm_intel_bo *  bo,
int  x1,
int  y1,
int  width,
int  height,
enum aub_dump_bmp_format  format,
int  pitch,
int  offset 
)

Definition at line 2036 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
       uint32_t cpp;

       switch (format) {
       case AUB_DUMP_BMP_FORMAT_8BIT:
              cpp = 1;
              break;
       case AUB_DUMP_BMP_FORMAT_ARGB_4444:
              cpp = 2;
              break;
       case AUB_DUMP_BMP_FORMAT_ARGB_0888:
       case AUB_DUMP_BMP_FORMAT_ARGB_8888:
              cpp = 4;
              break;
       default:
              printf("Unknown AUB dump format %d\n", format);
              return;
       }

       if (!bufmgr_gem->aub_file)
              return;

       aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
       aub_out(bufmgr_gem, (y1 << 16) | x1);
       aub_out(bufmgr_gem,
              (format << 24) |
              (cpp << 19) |
              pitch / 4);
       aub_out(bufmgr_gem, (height << 16) | width);
       aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
       aub_out(bufmgr_gem,
              ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
              ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
}

Here is the call graph for this function:

void drm_intel_gem_bo_clear_relocs ( drm_intel_bo *  bo,
int  start 
)

Removes existing relocation entries in the BO after "start".

This allows a user to avoid a two-step process for state setup with counting up all the buffer objects and doing a drm_intel_bufmgr_check_aperture_space() before emitting any of the relocations for the state setup. Instead, save the state of the batchbuffer including drm_intel_gem_get_reloc_count(), emit all the state, and then check if it still fits in the aperture.

Any further drm_intel_bufmgr_check_aperture_space() queries involving this buffer in the tree are undefined after this call.

Definition at line 1722 of file intel_bufmgr_gem.c.

{
       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
       int i;
       struct timespec time;

       clock_gettime(CLOCK_MONOTONIC, &time);

       assert(bo_gem->reloc_count >= start);
       /* Unreference the cleared target buffers */
       for (i = start; i < bo_gem->reloc_count; i++) {
              if (bo_gem->reloc_target_info[i].bo != bo) {
                     drm_intel_gem_bo_unreference_locked_timed(bo_gem->
                                                          reloc_target_info[i].bo,
                                                          time.tv_sec);
              }
       }
       bo_gem->reloc_count = start;
}

Here is the call graph for this function:

int drm_intel_gem_bo_context_exec ( drm_intel_bo *  bo,
drm_intel_context *  ctx,
int  used,
unsigned int  flags 
)

Definition at line 2299 of file intel_bufmgr_gem.c.

{
       return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
}

Here is the call graph for this function:

int drm_intel_gem_bo_get_reloc_count ( drm_intel_bo *  bo)

Definition at line 1701 of file intel_bufmgr_gem.c.

{
       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;

       return bo_gem->reloc_count;
}
int drm_intel_gem_bo_map_gtt ( drm_intel_bo *  bo)

Definition at line 1259 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
       struct drm_i915_gem_set_domain set_domain;
       int ret;

       pthread_mutex_lock(&bufmgr_gem->lock);

       ret = map_gtt(bo);
       if (ret) {
              pthread_mutex_unlock(&bufmgr_gem->lock);
              return ret;
       }

       /* Now move it to the GTT domain so that the GPU and CPU
        * caches are flushed and the GPU isn't actively using the
        * buffer.
        *
        * The pagefault handler does this domain change for us when
        * it has unbound the BO from the GTT, but it's up to us to
        * tell it when we're about to use things if we had done
        * rendering and it still happens to be bound to the GTT.
        */
       VG_CLEAR(set_domain);
       set_domain.handle = bo_gem->gem_handle;
       set_domain.read_domains = I915_GEM_DOMAIN_GTT;
       set_domain.write_domain = I915_GEM_DOMAIN_GTT;
       ret = drmIoctl(bufmgr_gem->fd,
                     DRM_IOCTL_I915_GEM_SET_DOMAIN,
                     &set_domain);
       if (ret != 0) {
              DBG("%s:%d: Error setting domain %d: %s\n",
                  __FILE__, __LINE__, bo_gem->gem_handle,
                  strerror(errno));
       }

       drm_intel_gem_bo_mark_mmaps_incoherent(bo);
       VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
       pthread_mutex_unlock(&bufmgr_gem->lock);

       return 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

int drm_intel_gem_bo_map_unsynchronized ( drm_intel_bo *  bo)

Performs a mapping of the buffer object like the normal GTT mapping, but avoids waiting for the GPU to be done reading from or rendering to the buffer.

This is used in the implementation of GL_ARB_map_buffer_range: The user asks to create a buffer, then does a mapping, fills some space, runs a drawing command, then asks to map it again without synchronizing because it guarantees that it won't write over the data that the GPU is busy using (or, more specifically, that if it does write over the data, it acknowledges that rendering is undefined).

Definition at line 1317 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
       int ret;

       /* If the CPU cache isn't coherent with the GTT, then use a
        * regular synchronized mapping.  The problem is that we don't
        * track where the buffer was last used on the CPU side in
        * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
        * we would potentially corrupt the buffer even when the user
        * does reasonable things.
        */
       if (!bufmgr_gem->has_llc)
              return drm_intel_gem_bo_map_gtt(bo);

       pthread_mutex_lock(&bufmgr_gem->lock);
       ret = map_gtt(bo);
       pthread_mutex_unlock(&bufmgr_gem->lock);

       return ret;
}

Here is the call graph for this function:

void drm_intel_gem_bo_start_gtt_access ( drm_intel_bo *  bo,
int  write_enable 
)

Sets the object to the GTT read and possibly write domain, used by the X 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().

In combination with drm_intel_gem_bo_pin() and manual fence management, we can do tiled pixmaps this way.

Definition at line 1542 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
       struct drm_i915_gem_set_domain set_domain;
       int ret;

       VG_CLEAR(set_domain);
       set_domain.handle = bo_gem->gem_handle;
       set_domain.read_domains = I915_GEM_DOMAIN_GTT;
       set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
       ret = drmIoctl(bufmgr_gem->fd,
                     DRM_IOCTL_I915_GEM_SET_DOMAIN,
                     &set_domain);
       if (ret != 0) {
              DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
                  __FILE__, __LINE__, bo_gem->gem_handle,
                  set_domain.read_domains, set_domain.write_domain,
                  strerror(errno));
       }
}

Here is the call graph for this function:

Here is the caller graph for this function:

int drm_intel_gem_bo_unmap_gtt ( drm_intel_bo *  bo)

Definition at line 1391 of file intel_bufmgr_gem.c.

{
       return drm_intel_gem_bo_unmap(bo);
}

Here is the call graph for this function:

int drm_intel_gem_bo_wait ( drm_intel_bo *  bo,
int64_t  timeout_ns 
)

Waits on a BO for the given amount of time.

: buffer object to wait for : amount of time to wait in nanoseconds. If value is less than 0, an infinite wait will occur.

Returns 0 if the wait was successful ie. the last batch referencing the object has completed within the allotted time. Otherwise some negative return value describes the error. Of particular interest is -ETIME when the wait has failed to yield the desired result.

Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows the operation to give up after a certain amount of time. Another subtle difference is the internal locking semantics are different (this variant does not hold the lock for the duration of the wait). This makes the wait subject to a larger userspace race window.

The implementation shall wait until the object is no longer actively referenced within a batch buffer at the time of the call. The wait will not guarantee that the buffer is re-issued via another thread, or an flinked handle. Userspace must make sure this race does not occur if such precision is important.

Definition at line 1506 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
       struct drm_i915_gem_wait wait;
       int ret;

       if (!bufmgr_gem->has_wait_timeout) {
              DBG("%s:%d: Timed wait is not supported. Falling back to "
                  "infinite wait\n", __FILE__, __LINE__);
              if (timeout_ns) {
                     drm_intel_gem_bo_wait_rendering(bo);
                     return 0;
              } else {
                     return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
              }
       }

       wait.bo_handle = bo_gem->gem_handle;
       wait.timeout_ns = timeout_ns;
       wait.flags = 0;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
       if (ret == -1)
              return -errno;

       return ret;
}

Here is the call graph for this function:

drm_intel_context* drm_intel_gem_context_create ( drm_intel_bufmgr *  bufmgr)

Definition at line 2844 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
       struct drm_i915_gem_context_create create;
       drm_i915_getparam_t gp;
       drm_intel_context *context = NULL;
       int tmp = 0, ret;

       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
       if (ret != 0) {
              fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
                     strerror(errno));
              return NULL;
       }

       context = calloc(1, sizeof(*context));
       context->ctx_id = create.ctx_id;
       context->bufmgr = bufmgr;

       return context;
}

Here is the call graph for this function:

void drm_intel_gem_context_destroy ( drm_intel_context *  ctx)

Definition at line 2867 of file intel_bufmgr_gem.c.

{
       drm_intel_bufmgr_gem *bufmgr_gem;
       struct drm_i915_gem_context_destroy destroy;
       int ret;

       if (ctx == NULL)
              return;

       bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
       destroy.ctx_id = ctx->ctx_id;
       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
                     &destroy);
       if (ret != 0)
              fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
                     strerror(errno));

       free(ctx);
}

Here is the call graph for this function:

int drm_intel_get_aperture_sizes ( int  fd,
size_t *  mappable,
size_t *  total 
)

Definition at line 301 of file intel_bufmgr.c.

{

       struct drm_i915_gem_get_aperture aperture;
       int ret;

       ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
       if (ret)
              return ret;

       *mappable = 0;
       /* XXX add a query for the kernel value? */
       if (*mappable == 0)
              *mappable = drm_intel_probe_agp_aperture_size(fd);
       if (*mappable == 0)
              *mappable = 64 * 1024 * 1024; /* minimum possible value */
       *total = aperture.aper_size;
       return 0;
}

Here is the call graph for this function:

int drm_intel_get_pipe_from_crtc_id ( drm_intel_bufmgr *  bufmgr,
int  crtc_id 
)

Definition at line 268 of file intel_bufmgr.c.

{
       if (bufmgr->get_pipe_from_crtc_id)
              return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
       return -1;
}