summaryrefslogtreecommitdiff
path: root/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
blob: 8739316b5367720b7b5ff4bef922b5727976cbb1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

// This file contains the command buffer helper class.

#ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
#define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_

#include <string.h>

#include "base/basictypes.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/cmd_buffer_common.h"
#include "gpu/command_buffer/common/command_buffer.h"
#include "gpu/gpu_export.h"

namespace gpu {

class Buffer;

#if !defined(OS_ANDROID)
#define CMD_HELPER_PERIODIC_FLUSH_CHECK
const int kCommandsPerFlushCheck = 100;
const int kPeriodicFlushDelayInMicroseconds =
    base::Time::kMicrosecondsPerSecond / (5 * 60);
#endif

const int kAutoFlushSmall = 16;  // 1/16 of the buffer
const int kAutoFlushBig = 2;     // 1/2 of the buffer

// Command buffer helper class. This class simplifies ring buffer management:
// it will allocate the buffer, give it to the buffer interface, and let the
// user add commands to it, while taking care of the synchronization (put and
// get). It also provides a way to ensure commands have been executed, through
// the token mechanism:
//
// helper.AddCommand(...);
// helper.AddCommand(...);
// int32 token = helper.InsertToken();
// helper.AddCommand(...);
// helper.AddCommand(...);
// [...]
//
// helper.WaitForToken(token);  // this doesn't return until the first two
//                              // commands have been executed.
class GPU_EXPORT CommandBufferHelper
    : public base::trace_event::MemoryDumpProvider {
 public:
  explicit CommandBufferHelper(CommandBuffer* command_buffer);
  ~CommandBufferHelper() override;

  // Initializes the CommandBufferHelper.
  // Parameters:
  //   ring_buffer_size: The size of the ring buffer portion of the command
  //       buffer.
  bool Initialize(int32 ring_buffer_size);

  // Sets whether the command buffer should automatically flush periodically
  // to try to increase performance. Defaults to true.
  void SetAutomaticFlushes(bool enabled);

  // True if the context is lost.
  bool IsContextLost();

  // Asynchronously flushes the commands, setting the put pointer to let the
  // buffer interface know that new commands have been added. After a flush
  // returns, the command buffer service is aware of all pending commands.
  void Flush();

  // Ensures that commands up to the put pointer will be processed in the
  // command buffer service before any future commands on other command buffers
  // sharing a channel.
  void OrderingBarrier();

  // Waits until all the commands have been executed. Returns whether it
  // was successful. The function will fail if the command buffer service has
  // disconnected.
  bool Finish();

  // Waits until a given number of available entries are available.
  // Parameters:
  //   count: number of entries needed. This value must be at most
  //     the size of the buffer minus one.
  void WaitForAvailableEntries(int32 count);

  // Inserts a new token into the command buffer. This token either has a value
  // different from previously inserted tokens, or ensures that previously
  // inserted tokens with that value have already passed through the command
  // stream.
  // Returns:
  //   the value of the new token or -1 if the command buffer reader has
  //   shutdown.
  int32 InsertToken();

  // Returns true if the token has passed.
  // Parameters:
  //   the value of the token to check whether it has passed
  bool HasTokenPassed(int32 token) const {
    if (token > token_)
      return true;  // we wrapped
    return last_token_read() >= token;
  }

  // Waits until the token of a particular value has passed through the command
  // stream (i.e. commands inserted before that token have been executed).
  // NOTE: This will call Flush if it needs to block.
  // Parameters:
  //   the value of the token to wait for.
  void WaitForToken(int32 token);

  // Called prior to each command being issued. Waits for a certain amount of
  // space to be available. Returns address of space.
  void* GetSpace(int32 entries) {
#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
    // Allow this command buffer to be pre-empted by another if a "reasonable"
    // amount of work has been done. On highend machines, this reduces the
    // latency of GPU commands. However, on Android, this can cause the
    // kernel to thrash between generating GPU commands and executing them.
    ++commands_issued_;
    if (flush_automatically_ &&
        (commands_issued_ % kCommandsPerFlushCheck == 0)) {
      PeriodicFlushCheck();
    }
#endif

    // Test for immediate entries.
    if (entries > immediate_entry_count_) {
      WaitForAvailableEntries(entries);
      if (entries > immediate_entry_count_)
        return NULL;
    }

    DCHECK_LE(entries, immediate_entry_count_);

    // Allocate space and advance put_.
    CommandBufferEntry* space = &entries_[put_];
    put_ += entries;
    immediate_entry_count_ -= entries;

    DCHECK_LE(put_, total_entry_count_);
    return space;
  }

  template <typename T>
  void ForceNullCheck(T* data) {
#if defined(COMPILER_MSVC) && defined(ARCH_CPU_64_BITS) && !defined(__clang__)
    // 64-bit MSVC's alias analysis was determining that the command buffer
    // entry couldn't be NULL, so it optimized out the NULL check.
    // Dereferencing the same datatype through a volatile pointer seems to
    // prevent that from happening. http://crbug.com/361936
    // TODO(jbauman): Remove once we're on VC2015, http://crbug.com/412902
    if (data)
      static_cast<volatile T*>(data)->header;
#endif
  }

  // Typed version of GetSpace. Gets enough room for the given type and returns
  // a reference to it.
  template <typename T>
  T* GetCmdSpace() {
    static_assert(T::kArgFlags == cmd::kFixed,
                  "T::kArgFlags should equal cmd::kFixed");
    int32 space_needed = ComputeNumEntries(sizeof(T));
    T* data = static_cast<T*>(GetSpace(space_needed));
    ForceNullCheck(data);
    return data;
  }

  // Typed version of GetSpace for immediate commands.
  template <typename T>
  T* GetImmediateCmdSpace(size_t data_space) {
    static_assert(T::kArgFlags == cmd::kAtLeastN,
                  "T::kArgFlags should equal cmd::kAtLeastN");
    int32 space_needed = ComputeNumEntries(sizeof(T) + data_space);
    T* data = static_cast<T*>(GetSpace(space_needed));
    ForceNullCheck(data);
    return data;
  }

  // Typed version of GetSpace for immediate commands.
  template <typename T>
  T* GetImmediateCmdSpaceTotalSize(size_t total_space) {
    static_assert(T::kArgFlags == cmd::kAtLeastN,
                  "T::kArgFlags should equal cmd::kAtLeastN");
    int32 space_needed = ComputeNumEntries(total_space);
    T* data = static_cast<T*>(GetSpace(space_needed));
    ForceNullCheck(data);
    return data;
  }

  int32 last_token_read() const {
    return command_buffer_->GetLastToken();
  }

  int32 get_offset() const {
    return command_buffer_->GetLastState().get_offset;
  }

  // Common Commands
  void Noop(uint32 skip_count) {
    cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
        (skip_count - 1) * sizeof(CommandBufferEntry));
    if (cmd) {
      cmd->Init(skip_count);
    }
  }

  void SetToken(uint32 token) {
    cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
    if (cmd) {
      cmd->Init(token);
    }
  }

  void SetBucketSize(uint32 bucket_id, uint32 size) {
    cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
    if (cmd) {
      cmd->Init(bucket_id, size);
    }
  }

  void SetBucketData(uint32 bucket_id,
                     uint32 offset,
                     uint32 size,
                     uint32 shared_memory_id,
                     uint32 shared_memory_offset) {
    cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
    if (cmd) {
      cmd->Init(bucket_id,
                offset,
                size,
                shared_memory_id,
                shared_memory_offset);
    }
  }

  void SetBucketDataImmediate(
      uint32 bucket_id, uint32 offset, const void* data, uint32 size) {
    cmd::SetBucketDataImmediate* cmd =
        GetImmediateCmdSpace<cmd::SetBucketDataImmediate>(size);
    if (cmd) {
      cmd->Init(bucket_id, offset, size);
      memcpy(ImmediateDataAddress(cmd), data, size);
    }
  }

  void GetBucketStart(uint32 bucket_id,
                      uint32 result_memory_id,
                      uint32 result_memory_offset,
                      uint32 data_memory_size,
                      uint32 data_memory_id,
                      uint32 data_memory_offset) {
    cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
    if (cmd) {
      cmd->Init(bucket_id,
                result_memory_id,
                result_memory_offset,
                data_memory_size,
                data_memory_id,
                data_memory_offset);
    }
  }

  void GetBucketData(uint32 bucket_id,
                     uint32 offset,
                     uint32 size,
                     uint32 shared_memory_id,
                     uint32 shared_memory_offset) {
    cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
    if (cmd) {
      cmd->Init(bucket_id,
                offset,
                size,
                shared_memory_id,
                shared_memory_offset);
    }
  }

  CommandBuffer* command_buffer() const {
    return command_buffer_;
  }

  scoped_refptr<Buffer> get_ring_buffer() const { return ring_buffer_; }

  uint32 flush_generation() const { return flush_generation_; }

  void FreeRingBuffer();

  bool HaveRingBuffer() const {
    return ring_buffer_id_ != -1;
  }

  bool usable () const {
    return usable_;
  }

  void ClearUsable() {
    usable_ = false;
    context_lost_ = true;
    CalcImmediateEntries(0);
  }

  // Overridden from base::trace_event::MemoryDumpProvider:
  bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
                    base::trace_event::ProcessMemoryDump* pmd) override;

 private:
  // Returns the number of available entries (they may not be contiguous).
  int32 AvailableEntries() {
    return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
  }

  void CalcImmediateEntries(int waiting_count);
  bool AllocateRingBuffer();
  void FreeResources();

  // Waits for the get offset to be in a specific range, inclusive. Returns
  // false if there was an error.
  bool WaitForGetOffsetInRange(int32 start, int32 end);

#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
  // Calls Flush if automatic flush conditions are met.
  void PeriodicFlushCheck();
#endif

  int32 GetTotalFreeEntriesNoWaiting() const;

  CommandBuffer* command_buffer_;
  int32 ring_buffer_id_;
  int32 ring_buffer_size_;
  scoped_refptr<gpu::Buffer> ring_buffer_;
  CommandBufferEntry* entries_;
  int32 total_entry_count_;  // the total number of entries
  int32 immediate_entry_count_;
  int32 token_;
  int32 put_;
  int32 last_put_sent_;
  int32 last_barrier_put_sent_;

#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
  int commands_issued_;
#endif

  bool usable_;
  bool context_lost_;
  bool flush_automatically_;

  base::TimeTicks last_flush_time_;

  // Incremented every time the helper flushes the command buffer.
  // Can be used to track when prior commands have been flushed.
  uint32 flush_generation_;

  friend class CommandBufferHelperTest;
  DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
};

}  // namespace gpu

#endif  // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_