summaryrefslogtreecommitdiff
path: root/third_party/waf/wafadmin/3rdparty/batched_cc.py
blob: 7ed569ccb709c4302650f53c45c1f55ec0b0cc83 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)

"""
Batched builds - compile faster
instead of compiling object files one by one, c/c++ compilers are often able to compile at once:
cc -c ../file1.c ../file2.c ../file3.c

Files are output on the directory where the compiler is called, and dependencies are more difficult
to track (do not run the command on all source files if only one file changes)

As such, we do as if the files were compiled one by one, but no command is actually run:
replace each cc/cpp Task by a TaskSlave
A new task called TaskMaster collects the signatures from each slave and finds out the command-line
to run.

To set this up, the method ccroot::create_task is replaced by a new version, to enable batched builds
it is only necessary to import this module in the configuration (no other change required)
"""

MAX_BATCH = 50
MAXPARALLEL = False

EXT_C = ['.c', '.cc', '.cpp', '.cxx']

import os, threading
import TaskGen, Task, ccroot, Build, Logs
from TaskGen import extension, feature, before
from Constants import *

cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} -c ${SRCLST}'
cc_fun = Task.compile_fun_noshell('batched_cc', cc_str)[0]

cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} -c ${SRCLST}'
cxx_fun = Task.compile_fun_noshell('batched_cxx', cxx_str)[0]

count = 70000
class batch_task(Task.Task):
	color = 'RED'

	after = 'cc cxx'
	before = 'cc_link cxx_link static_link'

	def __str__(self):
		return '(batch compilation for %d slaves)\n' % len(self.slaves)

	def __init__(self, *k, **kw):
		Task.Task.__init__(self, *k, **kw)
		self.slaves = []
		self.inputs = []
		self.hasrun = 0

		global count
		count += 1
		self.idx = count

	def add_slave(self, slave):
		self.slaves.append(slave)
		self.set_run_after(slave)

	def runnable_status(self):
		for t in self.run_after:
			if not t.hasrun:
				return ASK_LATER

		for t in self.slaves:
			#if t.executed:
			if t.hasrun != SKIPPED:
				return RUN_ME

		return SKIP_ME

	def run(self):
		outputs = []
		self.outputs = []

		srclst = []
		slaves = []
		for t in self.slaves:
			if t.hasrun != SKIPPED:
				slaves.append(t)
				srclst.append(t.inputs[0].abspath(self.env))

		self.env.SRCLST = srclst
		self.cwd = slaves[0].inputs[0].parent.abspath(self.env)

		env = self.env
		app = env.append_unique
		cpppath_st = env['CPPPATH_ST']
		env._CCINCFLAGS = env.CXXINCFLAGS = []

		# local flags come first
		# set the user-defined includes paths
		for i in env['INC_PATHS']:
			app('_CCINCFLAGS', cpppath_st % i.abspath())
			app('_CXXINCFLAGS', cpppath_st % i.abspath())
			app('_CCINCFLAGS', cpppath_st % i.abspath(env))
			app('_CXXINCFLAGS', cpppath_st % i.abspath(env))

		# set the library include paths
		for i in env['CPPPATH']:
			app('_CCINCFLAGS', cpppath_st % i)
			app('_CXXINCFLAGS', cpppath_st % i)

		if self.slaves[0].__class__.__name__ == 'cc':
			ret = cc_fun(self)
		else:
			ret = cxx_fun(self)

		if ret:
			return ret

		for t in slaves:
			t.old_post_run()

from TaskGen import extension, feature, after

import cc, cxx
def wrap(fun):
	def foo(self, node):
		# we cannot control the extension, this sucks
		self.obj_ext = '.o'

		task = fun(self, node)
		if not getattr(self, 'masters', None):
			self.masters = {}
			self.allmasters = []

		if not node.parent.id in self.masters:
			m = self.masters[node.parent.id] = self.master = self.create_task('batch')
			self.allmasters.append(m)
		else:
			m = self.masters[node.parent.id]
			if len(m.slaves) > MAX_BATCH:
				m = self.masters[node.parent.id] = self.master = self.create_task('batch')
				self.allmasters.append(m)

		m.add_slave(task)
		return task
	return foo

c_hook = wrap(cc.c_hook)
extension(cc.EXT_CC)(c_hook)

cxx_hook = wrap(cxx.cxx_hook)
extension(cxx.EXT_CXX)(cxx_hook)


@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_link')
def link_after_masters(self):
	if getattr(self, 'allmasters', None):
		for m in self.allmasters:
			self.link_task.set_run_after(m)

for c in ['cc', 'cxx']:
	t = Task.TaskBase.classes[c]
	def run(self):
		pass

	def post_run(self):
		#self.executed=1
		pass

	def can_retrieve_cache(self):
		if self.old_can_retrieve_cache():
			for m in self.generator.allmasters:
				try:
					m.slaves.remove(self)
				except ValueError:
					pass	#this task wasn't included in that master
			return 1
		else:
			return None

	setattr(t, 'oldrun', t.__dict__['run'])
	setattr(t, 'run', run)
	setattr(t, 'old_post_run', t.post_run)
	setattr(t, 'post_run', post_run)
	setattr(t, 'old_can_retrieve_cache', t.can_retrieve_cache)
	setattr(t, 'can_retrieve_cache', can_retrieve_cache)