dummy_format.py
1.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import os
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import lit.formats
import lit.Test
class DummyFormat(lit.formats.FileBasedTest):
def execute(self, test, lit_config):
# In this dummy format, expect that each test file is actually just a
# .ini format dump of the results to report.
source_path = test.getSourcePath()
cfg = ConfigParser.ConfigParser()
cfg.read(source_path)
# Create the basic test result.
result_code = cfg.get('global', 'result_code')
result_output = cfg.get('global', 'result_output')
result = lit.Test.Result(getattr(lit.Test, result_code),
result_output)
# Load additional metrics.
for key,value_str in cfg.items('results'):
value = eval(value_str)
if isinstance(value, int):
metric = lit.Test.IntMetricValue(value)
elif isinstance(value, float):
metric = lit.Test.RealMetricValue(value)
else:
raise RuntimeError("unsupported result type")
result.addMetric(key, metric)
# Create micro test results
for key,micro_name in cfg.items('micro-tests'):
micro_result = lit.Test.Result(getattr(lit.Test, result_code, ''))
# Load micro test additional metrics
for key,value_str in cfg.items('micro-results'):
value = eval(value_str)
if isinstance(value, int):
metric = lit.Test.IntMetricValue(value)
elif isinstance(value, float):
metric = lit.Test.RealMetricValue(value)
else:
raise RuntimeError("unsupported result type")
micro_result.addMetric(key, metric)
result.addMicroResult(micro_name, micro_result)
return result