summaryrefslogtreecommitdiff
path: root/chromium/gpu/command_buffer/service/scheduler.h
blob: a2a6c5d2e3faf9d58d9ea0a1f69a77b95f2cb926 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
// Copyright (c) 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_H_
#define GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_H_

#include <queue>
#include <vector>

#include "base/callback.h"
#include "base/containers/circular_deque.h"
#include "base/containers/flat_map.h"
#include "base/containers/flat_set.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "gpu/command_buffer/common/command_buffer_id.h"
#include "gpu/command_buffer/common/scheduling_priority.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/gpu_export.h"

namespace base {
class SingleThreadTaskRunner;
namespace trace_event {
class ConvertableToTraceFormat;
}
}

namespace gpu {
class SyncPointManager;

class GPU_EXPORT Scheduler {
 public:
  struct GPU_EXPORT Task {
    Task(SequenceId sequence_id,
         base::OnceClosure closure,
         std::vector<SyncToken> sync_token_fences);
    Task(Task&& other);
    ~Task();
    Task& operator=(Task&& other);

    SequenceId sequence_id;
    base::OnceClosure closure;
    std::vector<SyncToken> sync_token_fences;
  };

  Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
            SyncPointManager* sync_point_manager);

  virtual ~Scheduler();

  // Create a sequence with given priority. Returns an identifier for the
  // sequence that can be used with SyncPonintManager for creating sync point
  // release clients. Sequences start off as enabled (see |EnableSequence|).
  // Sequence could be created outside of GPU thread.
  SequenceId CreateSequence(SchedulingPriority priority);

  // Destroy the sequence and run any scheduled tasks immediately. Sequence
  // could be destroyed outside of GPU thread.
  void DestroySequence(SequenceId sequence_id);

  // Enables the sequence so that its tasks may be scheduled.
  void EnableSequence(SequenceId sequence_id);

  // Disables the sequence.
  void DisableSequence(SequenceId sequence_id);

  // Raise priority of sequence for client wait (WaitForGetOffset/TokenInRange)
  // on given command buffer.
  void RaisePriorityForClientWait(SequenceId sequence_id,
                                  CommandBufferId command_buffer_id);

  // Reset priority of sequence if it was increased for a client wait.
  void ResetPriorityForClientWait(SequenceId sequence_id,
                                  CommandBufferId command_buffer_id);

  // Schedules task (closure) to run on the sequence. The task is blocked until
  // the sync token fences are released or determined to be invalid. Tasks are
  // run in the order in which they are submitted.
  void ScheduleTask(Task task);

  void ScheduleTasks(std::vector<Task> tasks);

  // Continue running task on the sequence with the closure. This must be called
  // while running a previously scheduled task.
  void ContinueTask(SequenceId sequence_id, base::OnceClosure closure);

  // If the sequence should yield so that a higher priority sequence may run.
  bool ShouldYield(SequenceId sequence_id);

  base::WeakPtr<Scheduler> AsWeakPtr();

 private:

  struct SchedulingState {
    static bool Comparator(const SchedulingState& lhs,
                           const SchedulingState& rhs) {
      return rhs.RunsBefore(lhs);
    }

    SchedulingState();
    SchedulingState(const SchedulingState& other);
    ~SchedulingState();

    bool RunsBefore(const SchedulingState& other) const {
      return std::tie(priority, order_num) <
             std::tie(other.priority, other.order_num);
    }

    std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue()
        const;

    SequenceId sequence_id;
    SchedulingPriority priority = SchedulingPriority::kLow;
    uint32_t order_num = 0;
  };

  class GPU_EXPORT Sequence {
   public:
    Sequence(Scheduler* scheduler,
             SequenceId sequence_id,
             SchedulingPriority priority,
             scoped_refptr<SyncPointOrderData> order_data);

    ~Sequence();

    SequenceId sequence_id() const { return sequence_id_; }

    const scoped_refptr<SyncPointOrderData>& order_data() const {
      return order_data_;
    }

    bool enabled() const { return enabled_; }

    bool scheduled() const { return running_state_ == SCHEDULED; }

    bool running() const { return running_state_ == RUNNING; }

    // The sequence is runnable if its enabled and has tasks which are not
    // blocked by wait fences.
    bool IsRunnable() const;

    // Returns true if this sequence's scheduling state changed and it needs to
    // be reinserted into the scheduling queue.
    bool NeedsRescheduling() const;

    // Returns true if this sequence should yield to another sequence. Uses the
    // cached scheduling state for comparison.
    bool ShouldYieldTo(const Sequence* other) const;

    // Enables or disables the sequence.
    void SetEnabled(bool enabled);

    // Sets running state to SCHEDULED. Returns scheduling state for this
    // sequence used for inserting in the scheduling queue.
    SchedulingState SetScheduled();

    // Update cached scheduling priority while running.
    void UpdateRunningPriority();

    // Returns the next order number and closure. Sets running state to RUNNING.
    uint32_t BeginTask(base::OnceClosure* closure);

    // Called after running the closure returned by BeginTask. Sets running
    // state to IDLE.
    void FinishTask();

    // Enqueues a task in the sequence and returns the generated order number.
    uint32_t ScheduleTask(base::OnceClosure closure);

    // Continue running the current task with the given closure. Must be called
    // in between |BeginTask| and |FinishTask|.
    void ContinueTask(base::OnceClosure closure);

    // Add a sync token fence that this sequence should wait on.
    void AddWaitFence(const SyncToken& sync_token,
                      uint32_t order_num,
                      SequenceId release_sequence_id,
                      Sequence* release_sequence);

    // Remove a waiting sync token fence.
    void RemoveWaitFence(const SyncToken& sync_token,
                         uint32_t order_num,
                         SequenceId release_sequence_id);

    void AddClientWait(CommandBufferId command_buffer_id);

    void RemoveClientWait(CommandBufferId command_buffer_id);

    SchedulingPriority current_priority() const { return current_priority_; }

   private:
    enum RunningState { IDLE, SCHEDULED, RUNNING };

    struct WaitFence {
      WaitFence(WaitFence&& other);
      WaitFence(const SyncToken& sync_token,
                uint32_t order_num,
                SequenceId release_sequence_id);
      ~WaitFence();
      WaitFence& operator=(WaitFence&& other);

      SyncToken sync_token;
      uint32_t order_num;
      SequenceId release_sequence_id;

      bool operator==(const WaitFence& other) const {
        return std::tie(order_num, release_sequence_id, sync_token) ==
               std::tie(other.order_num, release_sequence_id, other.sync_token);
      }

      bool operator<(const WaitFence& other) const {
        return std::tie(order_num, release_sequence_id, sync_token) <
               std::tie(other.order_num, release_sequence_id, other.sync_token);
      }
    };

    struct Task {
      Task(Task&& other);
      Task(base::OnceClosure closure, uint32_t order_num);
      ~Task();
      Task& operator=(Task&& other);

      base::OnceClosure closure;
      uint32_t order_num;
    };

    // Description of Stream priority propagation: Each Stream has an initial
    // priority ('default_priority_').  When a Stream has other Streams waiting
    // on it via a 'WaitFence', it computes it's own priority based on those
    // fences, by keeping count of the priority of each incoming WaitFence's
    // priority in 'waiting_priority_counts_'.
    //
    // 'wait_fences_' maps each 'WaitFence' to it's current priority.  Initially
    // WaitFences take the priority of the waiting Stream, and propagate their
    // priority to the releasing Stream via AddWaitingPriority().
    //
    // A higher priority waiting stream or ClientWait, can recursively pass on
    // it's priority to existing 'ClientWaits' via PropagatePriority(), which
    // updates the releasing stream via ChangeWaitingPriority().
    //
    // When a 'WaitFence' is removed either by the SyncToken being released,
    // or when the waiting Stream is Destroyed, it removes it's priority from
    // the releasing stream via RemoveWaitingPriority().

    // Propagate a priority to all wait fences.
    void PropagatePriority(SchedulingPriority priority);

    // Add a waiting priority.
    void AddWaitingPriority(SchedulingPriority priority);

    // Remove a waiting priority.
    void RemoveWaitingPriority(SchedulingPriority priority);

    // Change a waiting priority.
    void ChangeWaitingPriority(SchedulingPriority old_priority,
                               SchedulingPriority new_priority);

    // Re-compute current priority.
    void UpdateSchedulingPriority();

    // If the sequence is enabled. Sequences are disabled/enabled based on when
    // the command buffer is descheduled/scheduled.
    bool enabled_ = true;

    RunningState running_state_ = IDLE;

    // Cached scheduling state used for comparison with other sequences while
    // running. Updated in |SetScheduled| and |UpdateRunningPriority|.
    SchedulingState scheduling_state_;

    Scheduler* const scheduler_;
    const SequenceId sequence_id_;

    const SchedulingPriority default_priority_;
    SchedulingPriority current_priority_;

    scoped_refptr<SyncPointOrderData> order_data_;

    // Deque of tasks. Tasks are inserted at the back with increasing order
    // number generated from SyncPointOrderData. If a running task needs to be
    // continued, it is inserted at the front with the same order number.
    base::circular_deque<Task> tasks_;

    // Map of fences that this sequence is waiting on. Fences are ordered in
    // increasing order number but may be removed out of order. Tasks are
    // blocked if there's a wait fence with order number less than or equal to
    // the task's order number.
    base::flat_map<WaitFence, SchedulingPriority> wait_fences_;

    // Counts of pending releases bucketed by scheduling priority.
    int waiting_priority_counts_[static_cast<int>(SchedulingPriority::kLast) +
                                 1] = {};

    base::flat_set<CommandBufferId> client_waits_;

    DISALLOW_COPY_AND_ASSIGN(Sequence);
  };

  void SyncTokenFenceReleased(const SyncToken& sync_token,
                              uint32_t order_num,
                              SequenceId release_sequence_id,
                              SequenceId waiting_sequence_id);

  void ScheduleTaskHelper(Task task);

  void TryScheduleSequence(Sequence* sequence);

  void RebuildSchedulingQueue();

  Sequence* GetSequence(SequenceId sequence_id);

  void RunNextTask();

  scoped_refptr<base::SingleThreadTaskRunner> task_runner_;

  SyncPointManager* const sync_point_manager_;

  mutable base::Lock lock_;

  // The following are protected by |lock_|.
  bool running_ = false;

  base::flat_map<SequenceId, std::unique_ptr<Sequence>> sequences_;

  // Used as a priority queue for scheduling sequences. Min heap of
  // SchedulingState with highest priority (lowest order) in front.
  std::vector<SchedulingState> scheduling_queue_;

  // If the scheduling queue needs to be rebuild because a sequence changed
  // priority.
  bool rebuild_scheduling_queue_ = false;

  base::ThreadChecker thread_checker_;

  // Invalidated on main thread.
  base::WeakPtr<Scheduler> weak_ptr_;
  base::WeakPtrFactory<Scheduler> weak_factory_{this};

 private:
  FRIEND_TEST_ALL_PREFIXES(SchedulerTest, StreamPriorities);
  FRIEND_TEST_ALL_PREFIXES(SchedulerTest, StreamDestroyRemovesPriorities);
  FRIEND_TEST_ALL_PREFIXES(SchedulerTest, StreamPriorityChangeWhileReleasing);
  FRIEND_TEST_ALL_PREFIXES(SchedulerTest, CircularPriorities);
  DISALLOW_COPY_AND_ASSIGN(Scheduler);
};

}  // namespace gpu

#endif  // GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_H_