Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name: Benchmark

on: [push, pull_request_target, workflow_dispatch]

jobs:
benchmark:
name: Performance check
if: contains(toJSON(github.event.head_commit.message), 'Merge pull request ') == false
timeout-minutes: 30
runs-on: ubuntu-24.04-arm
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Test changed files
id: changed-files
uses: tj-actions/changed-files@v47
with:
files: |
src/*.c
src/*.h
lib/c.h
lib/c.c

- name: Download dependencies
if: steps.changed-files.outputs.any_changed == 'true' || github.event_name == 'workflow_dispatch'
run: |
sudo dpkg --add-architecture armhf
sudo apt-get update -q -y
sudo apt-get install -q -y graphviz jq
sudo apt-get install -q -y build-essential libc6:armhf
sudo wget https://github.com/fastfetch-cli/fastfetch/releases/download/2.58.0/fastfetch-linux-aarch64.deb
sudo dpkg -i fastfetch-linux-aarch64.deb
sudo apt-get install -q -y python3

- name: Measure execution time and memory use for bootstrapping
if: steps.changed-files.outputs.any_changed == 'true' || github.event_name == 'workflow_dispatch'
run: |
make bench CC=gcc
make bench CC=gcc DYNLINK=1
16 changes: 16 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,15 @@ TESTBINS := $(TESTS:%.c=$(OUT)/%.elf)
SNAPSHOTS = $(foreach SNAPSHOT_ARCH,$(ARCHS), $(patsubst tests/%.c, tests/snapshots/%-$(SNAPSHOT_ARCH)-static.json, $(TESTS)))
SNAPSHOTS += $(patsubst tests/%.c, tests/snapshots/%-arm-dynamic.json, $(TESTS))

# Benchmark variables
BENCH_RUNS ?= 5
BENCH_OUTPUT_JSON ?= out/benchmark-$(CC)-$(ARCH)-static.json
BENCH_ARGS = --hostcc $(CC) --arch $(ARCH) --runs $(BENCH_RUNS) --output-json $(BENCH_OUTPUT_JSON)
ifeq ($(DYNLINK),1)
BENCH_OUTPUT_JSON = out/benchmark-$(CC)-$(ARCH)-dynamic.json
BENCH_ARGS += --dynlink
endif

all: config bootstrap

sanitizer: CFLAGS += -fsanitize=address -fsanitize=undefined -fno-omit-frame-pointer -O0
Expand Down Expand Up @@ -133,6 +142,13 @@ check-abi-stage2: $(OUT)/$(STAGE2)
echo "Skip ABI compliance validation"; \
fi

all-bench:
$(Q)$(foreach ARCH, $(ARCHS), $(MAKE) bench CC=$(CC) ARCH=$(ARCH) DYNLINK=0 BENCH_RUNS=$(BENCH_RUNS) --silent;)
$(Q)$(MAKE) bench CC=$(CC) ARCH=$(ARCH) DYNLINK=1 BENCH_RUNS=$(BENCH_RUNS) --silent

bench: tests/bench.py
$(Q)$< $(BENCH_ARGS)

update-snapshots: tests/update-snapshots.sh
$(Q)$(foreach SNAPSHOT_ARCH, $(ARCHS), $(MAKE) distclean config update-snapshot ARCH=$(SNAPSHOT_ARCH) DYNLINK=0 --silent;)
$(Q)$(MAKE) distclean config update-snapshot ARCH=arm DYNLINK=1 --silent
Expand Down
138 changes: 138 additions & 0 deletions tests/bench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
#!/usr/bin/env python3

import subprocess
import time
import resource
import statistics
import json
import argparse

DEFAULT_RUNS = 5
LINE_SEPARATOR = "=" * 70

def main():
parser = argparse.ArgumentParser(
description="Run benchmarks for shecc"
)
parser.add_argument(
"--hostcc",
default="gcc",
choices=["cc", "gcc", "clang"],
help="Host C Compiler (default: %(default)s)"
)
parser.add_argument(
"--arch",
default="arm",
choices=["arm", "riscv"],
help="Target architecture (default: %(default)s)"
)
parser.add_argument(
"--dynlink",
action="store_true",
help="Enable dynamic linking (default: static linking)"
)
parser.add_argument(
"--output-json",
default="out/benchmark.json",
help="Output JSON file name (default: %(default)s)"
)
parser.add_argument(
"--runs",
type=int,
default=5,
help=f"Number of runs (default: %(default)s)"
)
args = parser.parse_args()

# Check whether the given 'runs' is valid
if args.runs < 1:
parser.error("--runs must be at least 1")

# "cc" and "gcc" are equivalent
if args.hostcc == "cc":
args.hostcc = "gcc"

link_mode = "dynamic" if args.dynlink else "static"
available_config = f"(HOSTCC, ARCH, DYNLINK)"
config = f"({args.hostcc}, {args.arch}, {link_mode})"

# Measure execution time and max resident set size (RSS)
print(f"==> config: {available_config}={config}")
print(f"==> runs: {args.runs}")
print(f"==> output_json: {args.output_json}")

build_cmd = ["make", f"CC={args.hostcc}", f"ARCH={args.arch}",
f"DYNLINK={int(args.dynlink)}", "--silent"]
clean_cmd = ["make", "distclean", "--silent"]
exec_time = []
max_rss = 0

for i in range(args.runs):
subprocess.run(clean_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True
)

print(f"Running ({i + 1}/{args.runs})...")

start = time.monotonic()
subprocess.run(build_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True
)
end = time.monotonic()

usage = resource.getrusage(resource.RUSAGE_CHILDREN)

exec_time.append(end - start)
max_rss = max(max_rss, usage.ru_maxrss)

ave_time = statistics.mean(exec_time)

# Clean the build after generating benchmark results
subprocess.run(clean_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True
)

# Store the results to a json file
benchmark_name = args.output_json
result = [
{
"name": "Average Execution Time",
"unit": "second",
"value": ave_time,
"runs": args.runs,
"config": f"{available_config}={config}"
},
{
"name": "Maximum Resident Set Size",
"unit": "KBytes",
"value": max_rss,
"runs": args.runs,
"config": f"{available_config}={config}"
}
]

with open(benchmark_name, "w") as f:
# Append a newline character since dump() doesn't append one
# at the end of file
json.dump(result, f, indent=4)
f.write("\n")

# Output the results
print("\n" + LINE_SEPARATOR)
print("Benchmark results")
print(f"Config : {available_config}={config}")
print(f"Output file: {args.output_json}")
print(LINE_SEPARATOR)
for res in result:
print(f" {res['name']:30s}: {res['value']} {res['unit']} ({res['runs']} runs)")
print(LINE_SEPARATOR)


if __name__ == "__main__":
main()