169 lines
5.1 KiB
Python
169 lines
5.1 KiB
Python
#!/usr/bin/env python3
|
|
|
|
import os
|
|
import subprocess
|
|
|
|
from colorama import init as colorama_init
|
|
from colorama import Fore
|
|
from colorama import Style
|
|
|
|
stats = {}
|
|
|
|
# Détermine quel est l'exécutable clang utilisable.
|
|
# Note : à l'istic c'est clang-19
|
|
def clangName():
|
|
for name in ["clang-" + str(v) for v in range(20,16,-1)]:
|
|
p = subprocess.run(f"command -v {name} 2>&1 >/dev/null", shell=True)
|
|
if p.returncode == 0 :
|
|
return name
|
|
return "clang"
|
|
|
|
clang = clangName()
|
|
|
|
def runNormalTestSuit(testSuit):
|
|
print(f"Running test suit : {testSuit}")
|
|
stats[testSuit] = (0,0,0,0)
|
|
for dirname, dirnames, filenames in os.walk(testSuit):
|
|
for filename in filenames:
|
|
runNormalTest(testSuit, dirname, filename)
|
|
|
|
def runNormalTest(testSuit, dirname,filename):
|
|
vslToLLVM = False
|
|
llvmToBin = False
|
|
executionCorrect = False
|
|
|
|
path = os.path.join(dirname, filename)
|
|
basename, ext = os.path.splitext(path)
|
|
# Ignore not .vsl files
|
|
if ext != ".vsl":
|
|
return
|
|
|
|
print(f'\tRunning test {filename}')
|
|
|
|
# VSL -> LLVM
|
|
p = subprocess.run(f"java -jar build/libs/TP2.jar {path}", shell=True)
|
|
print(f"java -jar build/libs/TP2.jar {path}")
|
|
if p.returncode == 0 :
|
|
vslToLLVM = True
|
|
|
|
|
|
# LLVM -> Bin
|
|
if vslToLLVM :
|
|
p = subprocess.run(f"{clang} {basename}.ll -o {basename} 2>NULL", shell=True)
|
|
print(f"{clang} {basename}.ll -o {basename} 2>NULL")
|
|
if p.returncode == 0:
|
|
llvmToBin = True
|
|
executionCorrect = True
|
|
|
|
# Exe
|
|
if llvmToBin :
|
|
executionCorrect = True
|
|
try:
|
|
input = ""
|
|
if os.path.isfile(f"{basename}.test_in"):
|
|
input = f"< {basename}.test_in"
|
|
p = subprocess.run(f".\{basename} " + input, shell=True, stdout=subprocess.PIPE, timeout=5)
|
|
print(f".\{basename} ")
|
|
except subprocess.TimeoutExpired:
|
|
executionCorrect = "diverge" in basename
|
|
|
|
# Check return code
|
|
if os.path.isfile(f"{basename}.test_ret"):
|
|
with open(f"{basename}.test_ret", "r") as expected:
|
|
executionCorrect = executionCorrect and int(expected.read()) == p.returncode
|
|
# Check stdout
|
|
if os.path.isfile(f"{basename}.test_out"):
|
|
with open(f"{basename}.test_out", "rb") as expected:
|
|
executionCorrect = executionCorrect and expected.read() == p.stdout
|
|
|
|
print(f"{colorFromBool(vslToLLVM)}\t\tVSL to LLVM : {'OK' if vslToLLVM else 'Fail'}{Style.RESET_ALL}")
|
|
print(f"{colorFromBool(llvmToBin)}\t\tLLVM to Bin : {'OK' if llvmToBin else 'Fail'}{Style.RESET_ALL}")
|
|
print(f"{colorFromBool(executionCorrect)}\t\tCorrect Execution : {'OK' if executionCorrect else 'Fail'}{Style.RESET_ALL}")
|
|
|
|
x,y,z,t = stats[testSuit]
|
|
if vslToLLVM :
|
|
x += 1
|
|
if llvmToBin :
|
|
y += 1
|
|
if executionCorrect :
|
|
z += 1
|
|
t += 1
|
|
stats[testSuit] = (x,y,z,t)
|
|
|
|
def colorFromBool(b):
|
|
if b:
|
|
return Fore.GREEN
|
|
else:
|
|
return Fore.RED
|
|
|
|
|
|
def afficheStats(stat):
|
|
x,y,z,t = stat
|
|
print(f'\t Nombre de tests : {t}')
|
|
print(f'{colorFromBool(x==t)}\t vsl to llvm : {x} / {t}{Style.RESET_ALL}')
|
|
print(f'{colorFromBool(y==t)}\t llvm to bin : {y} / {t}{Style.RESET_ALL}')
|
|
print(f'{colorFromBool(z==t)}\t resultat correct: {z} / {t}{Style.RESET_ALL}')
|
|
|
|
nbError = 0
|
|
nbErrorTests = 0
|
|
|
|
def runErrorTest(testSuit, dirname,filename):
|
|
path = os.path.join(dirname, filename)
|
|
basename, ext = os.path.splitext(path)
|
|
if ext != ".vsl":
|
|
return
|
|
|
|
print(f'\tRunning test {filename}')
|
|
|
|
hasError = False
|
|
|
|
p = subprocess.run(f"java -jar build/libs/TP2.jar < {path} 1>{basename}.ll", shell=True, stderr=subprocess.PIPE)
|
|
|
|
if p.returncode != 0 and p.stderr != b'':
|
|
print(f"\t\t{Fore.GREEN}Error : Yes{Style.RESET_ALL}")
|
|
hasError = True
|
|
else:
|
|
print(f"\t\t{Fore.RED}Error : No{Style.RESET_ALL}")
|
|
hasError = False
|
|
|
|
x,t = stats[testSuit]
|
|
if hasError :
|
|
x += 1
|
|
t += 1
|
|
stats[testSuit] = (x,t)
|
|
|
|
|
|
def runErrorLevelTests(testSuit):
|
|
print(f"Running test suit : {testSuit}")
|
|
stats[testSuit] = (0,0)
|
|
for dirname, dirnames, filenames in os.walk(testSuit):
|
|
for filename in filenames:
|
|
runErrorTest(testSuit, dirname, filename)
|
|
|
|
|
|
def runTests(testDirName):
|
|
folderContent = [os.path.join(testDirName, d) for d in os.listdir(testDirName)]
|
|
testSuits = [d for d in folderContent if os.path.isdir(d)]
|
|
testSuits.sort()
|
|
|
|
for suit in testSuits:
|
|
if "error" in suit or "Error" in suit:
|
|
runErrorLevelTests(suit)
|
|
else:
|
|
runNormalTestSuit(suit)
|
|
|
|
for suit in testSuits:
|
|
print(f'Résumé du test {suit}')
|
|
if "error" in suit or "Error" in suit:
|
|
nbError, nbErrorTests = stats[suit]
|
|
print(f'{colorFromBool(nbError==nbErrorTests)}\t Nombre d\'erreurs : {nbError} / {nbErrorTests}{Style.RESET_ALL}')
|
|
else:
|
|
afficheStats(stats[suit])
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__" :
|
|
colorama_init()
|
|
runTests("tests") |