-
-
Notifications
You must be signed in to change notification settings - Fork 3.3k
/
run_benchmarks.py
62 lines (50 loc) · 2.23 KB
/
run_benchmarks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
import re
import sys
import json
import subprocess
import statistics
from pathlib import Path
dataPath = '../../../Data'
if len(sys.argv) > 1:
dataPath = sys.argv[1]
print(f'Using data path {dataPath}')
results = {}
for baseDirectory in ["Algorithm.CSharp/Benchmarks", "Algorithm.Python/Benchmarks"]:
language = baseDirectory[len("Algorithm") + 1:baseDirectory.index("/")]
resultsPerLanguage = {}
for algorithmFile in sorted(os.listdir(baseDirectory)):
if algorithmFile.endswith(("py", "cs")):
algorithmName = Path(algorithmFile).stem
if "Fine" in algorithmName:
# we skip fundamental benchmarks for now
continue
algorithmLocation = "QuantConnect.Algorithm.CSharp.dll" if language == "CSharp" else os.path.join("../../../", baseDirectory, algorithmFile)
print(f'Start running algorithm {algorithmName} language {language}...')
dataPointsPerSecond = []
benchmarkLengths = []
for x in range(1, 2):
subprocess.run(["dotnet", "./QuantConnect.Lean.Launcher.dll",
"--data-folder " + dataPath,
"--algorithm-language " + language,
"--algorithm-type-name " + algorithmName,
"--algorithm-location " + algorithmLocation,
"--log-handler ConsoleErrorLogHandler",
"--close-automatically true"],
cwd="./Launcher/bin/Release",
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
algorithmLogs = os.path.join("./Launcher/bin/Release", algorithmName + "-log.txt")
file = open(algorithmLogs, 'r')
for line in file.readlines():
for match in re.findall(r"(\d+)k data points per second", line):
dataPointsPerSecond.append(int(match))
for match in re.findall(r" completed in (\d+)", line):
benchmarkLengths.append(int(match))
averageDps = statistics.mean(dataPointsPerSecond)
averageLength = statistics.mean(benchmarkLengths)
resultsPerLanguage[algorithmName] = { "average-dps": averageDps, "samples": dataPointsPerSecond, "average-length": averageLength }
print(f'Performance for {algorithmName} language {language} avg dps: {averageDps}k samples: [{",".join(str(x) for x in dataPointsPerSecond)}] avg length {averageLength} sec')
results[language] = resultsPerLanguage
with open("benchmark_results.json", "w") as outfile:
json.dump(results, outfile)