aboutsummaryrefslogtreecommitdiff
path: root/waflib/Tools/waf_unit_test.py
blob: 74d6c0561d6ca61cc6bf2b73729f0f920366258e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
#!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006
# Thomas Nagy, 2010-2018 (ita)

"""
Unit testing system for C/C++/D and interpreted languages providing test execution:

* in parallel, by using ``waf -j``
* partial (only the tests that have changed) or full (by using ``waf --alltests``)

The tests are declared by adding the **test** feature to programs::

	def options(opt):
		opt.load('compiler_cxx waf_unit_test')
	def configure(conf):
		conf.load('compiler_cxx waf_unit_test')
	def build(bld):
		bld(features='cxx cxxprogram test', source='main.cpp', target='app')
		# or
		bld.program(features='test', source='main2.cpp', target='app2')

When the build is executed, the program 'test' will be built and executed without arguments.
The success/failure is detected by looking at the return code. The status and the standard output/error
are stored on the build context.

The results can be displayed by registering a callback function. Here is how to call
the predefined callback::

	def build(bld):
		bld(features='cxx cxxprogram test', source='main.c', target='app')
		from waflib.Tools import waf_unit_test
		bld.add_post_fun(waf_unit_test.summary)

By passing --dump-test-scripts the build outputs corresponding python files
(with extension _run.py) that are useful for debugging purposes.
"""

import os, shlex, sys
from waflib.TaskGen import feature, after_method, taskgen_method
from waflib import Utils, Task, Logs, Options
from waflib.Tools import ccroot
testlock = Utils.threading.Lock()

SCRIPT_TEMPLATE = """#! %(python)s
import subprocess, sys
cmd = %(cmd)r
# if you want to debug with gdb:
#cmd = ['gdb', '-args'] + cmd
env = %(env)r
status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
sys.exit(status)
"""

@taskgen_method
def handle_ut_cwd(self, key):
	"""
	Task generator method, used internally to limit code duplication.
	This method may disappear anytime.
	"""
	cwd = getattr(self, key, None)
	if cwd:
		if isinstance(cwd, str):
			# we want a Node instance
			if os.path.isabs(cwd):
				self.ut_cwd = self.bld.root.make_node(cwd)
			else:
				self.ut_cwd = self.path.make_node(cwd)

@feature('test_scripts')
def make_interpreted_test(self):
	"""Create interpreted unit tests."""
	for x in ['test_scripts_source', 'test_scripts_template']:
		if not hasattr(self, x):
			Logs.warn('a test_scripts taskgen i missing %s' % x)
			return

	self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False))

	script_nodes = self.to_nodes(self.test_scripts_source)
	for script_node in script_nodes:
		tsk = self.create_task('utest', [script_node])
		tsk.vars = lst + tsk.vars
		tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd())

	self.handle_ut_cwd('test_scripts_cwd')

	env = getattr(self, 'test_scripts_env', None)
	if env:
		self.ut_env = env
	else:
		self.ut_env = dict(os.environ)

	paths = getattr(self, 'test_scripts_paths', {})
	for (k,v) in paths.items():
		p = self.ut_env.get(k, '').split(os.pathsep)
		if isinstance(v, str):
			v = v.split(os.pathsep)
		self.ut_env[k] = os.pathsep.join(p + v)

@feature('test')
@after_method('apply_link', 'process_use')
def make_test(self):
	"""Create the unit test task. There can be only one unit test task by task generator."""
	if not getattr(self, 'link_task', None):
		return

	tsk = self.create_task('utest', self.link_task.outputs)
	if getattr(self, 'ut_str', None):
		self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False))
		tsk.vars = lst + tsk.vars

	self.handle_ut_cwd('ut_cwd')

	if not hasattr(self, 'ut_paths'):
		paths = []
		for x in self.tmp_use_sorted:
			try:
				y = self.bld.get_tgen_by_name(x).link_task
			except AttributeError:
				pass
			else:
				if not isinstance(y, ccroot.stlink_task):
					paths.append(y.outputs[0].parent.abspath())
		self.ut_paths = os.pathsep.join(paths) + os.pathsep

	if not hasattr(self, 'ut_env'):
		self.ut_env = dct = dict(os.environ)
		def add_path(var):
			dct[var] = self.ut_paths + dct.get(var,'')
		if Utils.is_win32:
			add_path('PATH')
		elif Utils.unversioned_sys_platform() == 'darwin':
			add_path('DYLD_LIBRARY_PATH')
			add_path('LD_LIBRARY_PATH')
		else:
			add_path('LD_LIBRARY_PATH')

	if not hasattr(self, 'ut_cmd'):
		self.ut_cmd = getattr(Options.options, 'testcmd', False)

@taskgen_method
def add_test_results(self, tup):
	"""Override and return tup[1] to interrupt the build immediately if a test does not run"""
	Logs.debug("ut: %r", tup)
	try:
		self.utest_results.append(tup)
	except AttributeError:
		self.utest_results = [tup]
	try:
		self.bld.utest_results.append(tup)
	except AttributeError:
		self.bld.utest_results = [tup]

@Task.deep_inputs
class utest(Task.Task):
	"""
	Execute a unit test
	"""
	color = 'PINK'
	after = ['vnum', 'inst']
	vars = []

	def runnable_status(self):
		"""
		Always execute the task if `waf --alltests` was used or no
		tests if ``waf --notests`` was used
		"""
		if getattr(Options.options, 'no_tests', False):
			return Task.SKIP_ME

		ret = super(utest, self).runnable_status()
		if ret == Task.SKIP_ME:
			if getattr(Options.options, 'all_tests', False):
				return Task.RUN_ME
		return ret

	def get_test_env(self):
		"""
		In general, tests may require any library built anywhere in the project.
		Override this method if fewer paths are needed
		"""
		return self.generator.ut_env

	def post_run(self):
		super(utest, self).post_run()
		if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]:
			self.generator.bld.task_sigs[self.uid()] = None

	def run(self):
		"""
		Execute the test. The execution is always successful, and the results
		are stored on ``self.generator.bld.utest_results`` for postprocessing.

		Override ``add_test_results`` to interrupt the build
		"""
		if hasattr(self.generator, 'ut_run'):
			return self.generator.ut_run(self)

		self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()])
		ut_cmd = getattr(self.generator, 'ut_cmd', False)
		if ut_cmd:
			self.ut_exec = shlex.split(ut_cmd % ' '.join(self.ut_exec))

		return self.exec_command(self.ut_exec)

	def exec_command(self, cmd, **kw):
		self.generator.bld.log_command(cmd, kw)
		if getattr(Options.options, 'dump_test_scripts', False):
			script_code = SCRIPT_TEMPLATE % {
				'python': sys.executable,
				'env': self.get_test_env(),
				'cwd': self.get_cwd().abspath(),
				'cmd': cmd
			}
			script_file = self.inputs[0].abspath() + '_run.py'
			Utils.writef(script_file, script_code)
			os.chmod(script_file, Utils.O755)
			if Logs.verbose > 1:
				Logs.info('Test debug file written as %r' % script_file)

		proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(),
			stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str))
		(stdout, stderr) = proc.communicate()
		self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr)
		testlock.acquire()
		try:
			return self.generator.add_test_results(tup)
		finally:
			testlock.release()

	def get_cwd(self):
		return getattr(self.generator, 'ut_cwd', self.inputs[0].parent)

def summary(bld):
	"""
	Display an execution summary::

		def build(bld):
			bld(features='cxx cxxprogram test', source='main.c', target='app')
			from waflib.Tools import waf_unit_test
			bld.add_post_fun(waf_unit_test.summary)
	"""
	lst = getattr(bld, 'utest_results', [])
	if lst:
		Logs.pprint('CYAN', 'execution summary')

		total = len(lst)
		tfail = len([x for x in lst if x[1]])

		Logs.pprint('GREEN', '  tests that pass %d/%d' % (total-tfail, total))
		for (f, code, out, err) in lst:
			if not code:
				Logs.pprint('GREEN', '    %s' % f)

		Logs.pprint('GREEN' if tfail == 0 else 'RED', '  tests that fail %d/%d' % (tfail, total))
		for (f, code, out, err) in lst:
			if code:
				Logs.pprint('RED', '    %s' % f)

def set_exit_code(bld):
	"""
	If any of the tests fail waf will exit with that exit code.
	This is useful if you have an automated build system which need
	to report on errors from the tests.
	You may use it like this:

		def build(bld):
			bld(features='cxx cxxprogram test', source='main.c', target='app')
			from waflib.Tools import waf_unit_test
			bld.add_post_fun(waf_unit_test.set_exit_code)
	"""
	lst = getattr(bld, 'utest_results', [])
	for (f, code, out, err) in lst:
		if code:
			msg = []
			if out:
				msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8')))
			if err:
				msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8')))
			bld.fatal(os.linesep.join(msg))


def options(opt):
	"""
	Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options.
	"""
	opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests')
	opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests')
	opt.add_option('--clear-failed', action='store_true', default=False,
		help='Force failed unit tests to run again next time', dest='clear_failed_tests')
	opt.add_option('--testcmd', action='store', default=False, dest='testcmd',
		help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind')
	opt.add_option('--dump-test-scripts', action='store_true', default=False,
		help='Create python scripts to help debug tests', dest='dump_test_scripts')