libpulsar
A modular compiler for the pulsar programming language
Loading...
Searching...
No Matches
run.py
Go to the documentation of this file.
1# Copyright (C) 2023 Ethan Uppal. All rights reserved.
2# run.py: Runs tests for libpulsar.
3
4from sys import exit, stdout
5import subprocess
6import os
7
8tests_passed = 0
9REDO_LINE = "\033[2K\r"
10SRCDIR = './src'
11LIBDIR = './lib'
12REPORTS_DIR = './tests/reports/'
13OUT = './tests/bin/a.out'
14
15# runs a command and returns the output, error, and exit code
16def cmd(args):
17 proc = subprocess.Popen(args,
18 stdout=subprocess.PIPE,
19 stderr=subprocess.PIPE
20 )
21 out, err = proc.communicate()
22 return out, err, proc.returncode
23
24# rebuilds the library and clears reports
25def rebuild():
26 print("+ \033[32;1mclearing reports\033[m")
27 for report in os.listdir(REPORTS_DIR):
28 if report.endswith(".txt"):
29 os.remove(REPORTS_DIR + report)
30 stdout.write("* \033[33;1mrebuilding library\033[m")
31 stdout.flush()
32 out, err, code = cmd(['make', 'clean_code'])
33 if code != 0:
34 print(err.decode('utf-8'))
35 return False
36 out, err, code = cmd(['make', 'static'])
37 if code != 0:
38 print(err.decode('utf-8'))
39 return False
40 print(REDO_LINE + "+ \033[32;1mrebuilt library\033[m")
41 return True
42
43# indicates that a test is starting
44def print_start_test(name):
45 stdout.write(f"* \033[33;1mrunning test: {name}\033[m")
46 stdout.flush()
47
48# compiles the given filename as an executable linked with libpulsar
49def create_exec(filename):
50 _, err, code = cmd([
51 '/usr/bin/gcc', filename,
52 '-L', LIBDIR,
53 '-l', 'pulsar',
54 '-I', SRCDIR,
55 '-D', 'MAIN',
56 '-o', OUT
57 ])
58 if code != 0:
59 print(f"\n \033[31;1mcompiling failed with exit code {code}:\033[m")
60 print(err.decode('ascii'))
61 exit(code)
62 return err, code
63
64def run_exec(args = []):
65 return cmd([
66 OUT
67 ] + args)
68
69# runs an output-comparison test
70def run_output_test(case):
71 global tests_passed
72 TEST_PRGS = './working_programs/test/'
73 TEST_DATA = './tests/data/'
74 SRCDIR = './src'
75 LIBDIR = './lib'
76 OUT = './tests/bin/a.out'
77
78 # print test name
79 filename = case[0]
80 casename = case[1]
81 note = ''
82 if len(case) >= 3:
83 note = f" (note: {case[2]})"
84 print_start_test(casename)
85
86 # run test
87 INPUT_FILE = TEST_DATA + casename + '.in'
88 OUTPUT_FILE = TEST_DATA + casename + '.out'
89 create_exec(TEST_PRGS + filename)
90 out, err, _ = run_exec([INPUT_FILE] + case[3:]);
91
92 # compare output and print test status
93 with open(OUTPUT_FILE, 'r') as f:
94 stdout.write(REDO_LINE)
95 act = out.decode('ascii')
96 exp = f.read().replace('dev.plsr', INPUT_FILE)
97 if act != exp:
98 print(f"- \033[31;1mtest '{casename}' failed{note}\033[m")
99 print(out.decode('ascii'))
100 print(err.decode('ascii'))
101 else:
102 print(f"+ \033[32;1mtest '{casename}' passed{note}\033[m")
103 tests_passed += 1
104
105# runs a unit test
106def run_unit_test(filename):
107 global tests_passed
108 TEST_PRGS = './tests/unit/'
109
110 # print test name
111 name = filename.split('.')[0]
112 print_start_test(name)
113
114 # run test
115 err, code = create_exec(TEST_PRGS + filename)
116 out, err, code = run_exec()
117
118 # print test status
119 stdout.write(REDO_LINE)
120 if code != 0:
121 print(f"- \033[31;1mtest '{name}' failed\033[m")
122 print(out.decode('ascii'))
123 print(err.decode('ascii'))
124 else:
125 print(f"+ \033[32;1mtest '{name}' passed\033[m")
126 tests_passed += 1
127
128# prints a summary of the tests
129def print_summary(count):
130 print("\nSummary:")
131 if tests_passed == 0:
132 print(f"- \033[31;1mNone of the {count} tests passed\033[m")
133 exit(1)
134 elif tests_passed < count / 2:
135 print(f"- \033[31;1mLess than half of the tests ({tests_passed}/{count}) passed\033[m")
136 exit(1)
137 elif tests_passed < count:
138 print(f"~ \033[33;1mMost of the tests ({tests_passed}/{count}) passed\033[m");
139 exit(1)
140 else:
141 print(f"+ \033[32;1mAll {count} tests passed!\033[m");
142
143def main():
144 # setup for new round of tests
145 if not rebuild():
146 return
147
148 # tests that compare output of file
149 output_tests = [
150 ['lex.c', 'lexer0', 'empty'],
151 ['lex.c', 'lexer1', 'string interp'],
152 ['lex.c', 'lexer2', 'string interp'],
153 ['lex.c', 'lexer3', 'string interp'],
154 ['lex.c', 'everytoken'],
155 ['hashmap.c', 'hash1'],
156 # ['hashmap2.c', 'hash2'],
157 ['parse.c', 'parse1', 'basic expr lits'],
158 ['parse.c', 'parse2', 'let statements (no types)'],
159 ['parse.c', 'parse3', 'string interp'],
160 ['parse.c', 'parse4', 'precedence/associativity'],
161 ['parse.c', 'parse5', 'functions/blocks'],
162 ['parse.c', 'parse6', 'while/if/return'],
163 ['parse.c', 'parse7', 'extern'],
164 ['parse.c', 'parse8', 'calls/qualifiers'],
165 ]
166 # unit tests (think assertions)
167 unit_tests = os.listdir('./tests/unit')
168 unit_tests.remove('TEMPLATE.c')
169 for case in output_tests:
170 run_output_test(case)
171 for case in unit_tests:
172 run_unit_test(case)
173 cmd(['rm', OUT])
174 print_summary(len(output_tests) + len(unit_tests))
175
176if __name__ == "__main__":
177 main()
int main(int argc, const char *argv[])
Definition main.c:7