Skip to content

Commit

Permalink
A bit of cleanup and add coremark/mmbm/perfindex to the CI benchmark.
Browse files Browse the repository at this point in the history
  • Loading branch information
jiceatscion committed Mar 27, 2024
1 parent 4cbc830 commit c6b8538
Show file tree
Hide file tree
Showing 3 changed files with 83 additions and 23 deletions.
6 changes: 6 additions & 0 deletions acceptance/router_benchmark/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,19 @@ exports_files(
args = [
"--executable",
"brload:$(location //acceptance/router_benchmark/brload:brload)",
"--executable",
"coremark:$(location //tools/coremark:coremark)",
"--executable",
"mmbm:$(location //tools/mmbm:mmbm)",
"--docker-image=$(location //docker:router.tarball)",
]

data = [
":conf",
"//docker:router.tarball",
"//acceptance/router_benchmark/brload:brload",
"//tools/coremark:coremark",
"//tools/mmbm:mmbm",
]

raw_test(
Expand Down
29 changes: 17 additions & 12 deletions acceptance/router_benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,16 @@
# TODO(jiceatscion): get it from brload
BM_PACKET_LEN = 154

# Convenience types to carry interface params.
IntfReq = namedtuple("IntfReq", "label, prefixLen, ip, peerIp, exclusive")


def sudo(*args: [str]) -> str:
# -A, --askpass makes sure command is failing and does not wait for
# interactive password input.
return cmd.sudo("-A", *args)


# Convenience types to carry interface params.
IntfReq = namedtuple("IntfReq", "label, prefixLen, ip, peerIp, exclusive")

class RouterBM:
"""Evaluates the performance of an external router running the SCION reference implementation.
Expand Down Expand Up @@ -238,7 +239,6 @@ def run_test_case(self, case: str, mapArgs: list[str]) -> (int, int):
beg = "0"
end = "0"
for line in output.splitlines():
print(f"brload said: {line}")
if line.startswith("metricsBegin"):
_, beg, _, end = line.split()

Expand Down Expand Up @@ -312,7 +312,7 @@ def run_test_case(self, case: str, mapArgs: list[str]) -> (int, int):

# Fetch and log the number of cores used by Go. This may inform performance
# modeling later.
def log_core_counts(self):
def core_count(self) -> int:
print("==> Collecting number of cores...")
promQuery = urlencode({
'query': 'go_sched_maxprocs_threads{job="BR"}'
Expand All @@ -327,10 +327,15 @@ def log_core_counts(self):

pld = json.loads(resp.read().decode("utf-8"))
results = pld["data"]["result"]
for result in results:
instance = result["metric"]["instance"]
_, val = result["value"]
print(f"Router Cores for {instance}: {int(val)}")
if len(results) > 1:
print(f"FAILED: Found more than one subject router in results: {results}")
exit(1)

result = results[0]
instance = result["metric"]["instance"]
_, val = result["value"]
print(f"Router Cores for {instance}: {int(val)}")
return int(val)

def horsepower(self) -> tuple[int]:
resp = urlopen(f"https://{self.scrapeAddr}/horsepower.txt",
Expand All @@ -346,7 +351,7 @@ def horsepower(self) -> tuple[int]:

def perfIndex(self, rate: int, coremark:int, mmbm: int) -> float:
# mmbm is in mebiBytes/s
return 1.0 / (coremark * (1.0/rate - BM_PACKET_LEN / (mmbm * 1024 * 1024)))
return 1.0 / (coremark_sum * (1.0/rate - BM_PACKET_LEN / (mmbm * 1024 * 1024)))

def run(self):
print("Benchmarking...")
Expand All @@ -356,7 +361,7 @@ def run(self):
coremarkstr = str(coremark or "Unavailable")
mmbmstr = str(mmbm or "Unavailable")
print(f"Coremark: {coremarkstr}")
print(f"Memory bandwidth: {mmbm} MiB/s")
print(f"Memory bandwidth (MiB/s): {mmbmstr}")

# Build the interface mapping arg (here, we do not override the brload side mac address)
mapArgs = []
Expand All @@ -377,7 +382,7 @@ def run(self):
rateMap[testCase] = processed
droppageMap[testCase] = dropped

self.log_core_counts()
cores = self.core_count()

# Output the performance...
for tt in TEST_CASES:
Expand Down
71 changes: 60 additions & 11 deletions acceptance/router_benchmark/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,20 @@
"br_transit": 720000,
}

# TODO(jiceatscion): get it from brload
BM_PACKET_LEN = 154

# Convenience types to carry interface params.
IntfReq = namedtuple("IntfReq", "label, prefixLen, ip, peerIp, exclusive")
Intf = namedtuple("Intf", "name, mac, peerMac")


def sudo(*args: [str]) -> str:
# -A, --askpass makes sure command is failing and does not wait for
# interactive password input.
return cmd.sudo("-A", *args)


# Convenience types to carry interface params.
IntfReq = namedtuple("IntfReq", "label, prefixLen, ip, peerIp, exclusive")
Intf = namedtuple("Intf", "name, mac, peerMac")


# Make-up an eth mac address as unique as the given IP.
# Assumes ips in a /16 or smaller block (i.e. The last two bytes are unique within the test).
def mac_for_ip(ip: str) -> str:
Expand Down Expand Up @@ -513,7 +515,7 @@ def run_test_case(self, case: str, mapArgs: list[str]) -> (int, int):

# Fetch and log the number of cores used by Go. This may inform performance
# modeling later.
def log_core_counts(self):
def core_count(self) -> int:
logger.info("==> Collecting number of cores...")
promQuery = urlencode({
'query': 'go_sched_maxprocs_threads{job="BR"}'
Expand All @@ -527,12 +529,53 @@ def log_core_counts(self):

pld = json.loads(resp.read().decode("utf-8"))
results = pld["data"]["result"]
for result in results:
instance = result["metric"]["instance"]
_, val = result["value"]
logger.info(f"Router Cores for {instance}: {int(val)}")
if len(results) > 1:
raise RuntimeError(f"FAILED: Found more than one subject router in results: {results}")

result = results[0]
instance = result["metric"]["instance"]
_, val = result["value"]
logger.info(f"Router Cores for {instance}: {int(val)}")
return int(val)

def horsepower(self) -> tuple[int]:
coremark = 0
mmbm = 0
try:
coremark_exe = self.get_executable("coremark")
output = coremark_exe()
line = output.splitlines()[-1]
if line.startswith("CoreMark "):
elems = line.split(" ")
if len(elems) >= 4:
coremark = float(elems[3])
except Exception as e:
print(e)

try:
mmbm_exe = self.get_executable("mmbm")
output = mmbm_exe()
line = output.splitlines()[-1]
if line.startswith("\"mmbm\": "):
elems = line.split(" ")
if len(elems) >= 2:
mmbm = float(elems[1])
except Exception as e:
print(e)

return round(coremark), round(mmbm)

def perfIndex(self, rate: int, coremark:int, mmbm: int) -> float:
# mmbm is in mebiBytes/s
return 1.0 / (coremark * (1.0/rate - BM_PACKET_LEN / (mmbm * 1024 * 1024)))

def _run(self):
coremark, mmbm = self.horsepower()
coremarkstr = str(coremark or "Unavailable")
mmbmstr = str(mmbm or "Unavailable")
logger.info(f"Coremark: {coremarkstr}")
logger.info(f"Memory bandwidth (MiB/s): {mmbmstr}")

# Build the interface mapping arg
mapArgs = []
for label, intf in self.intfMap.items():
Expand All @@ -550,7 +593,13 @@ def _run(self):
rateMap[testCase] = processed
droppageMap[testCase] = dropped

self.log_core_counts()
cores = self.core_count()

if coremark != 0 and mmbm != 0:
for tt in TEST_CASES:
# TODO(jiceatscion): The perf index assumes that line speed isn't the bottleneck.
# It almost never is, but ideally we'd need to run iperf3 to verify.
logger.info(f"Perf index for {tt}: {self.perfIndex(rateMap[tt], coremark, mmbm)}")

# Log and check the performance...
# If this is used as a CI test. Make sure that the performance is within the expected
Expand Down

0 comments on commit c6b8538

Please sign in to comment.