[mpact][compiler] enable optional opt-level for JIT compiler (#28)
The jit compile step now allows for an optional opt_level,
as shown below
invoker, fn = mpact_jit_compile(net, .. inputs .., opt_level=3)
diff --git a/python/mpact/mpactbackend.py b/python/mpact/mpactbackend.py
index bb8cc3b..ef54a74 100644
--- a/python/mpact/mpactbackend.py
+++ b/python/mpact/mpactbackend.py
@@ -22,10 +22,9 @@
from mpact.passmanager import *
from mpact.runtime import *
-# One time set up of support library and optimization level.
+# One time set up of support library.
SUPPORT_LIB = os.getenv("SUPPORT_LIB", default=None)
SHARED_LIBS = [] if SUPPORT_LIB is None else [SUPPORT_LIB]
-OPT_LEVEL = int(os.getenv("OPT_LEVEL", default=2))
# A type shared between the result of `LinalgOnTensorsBackend.compile` and the
# input to `LinalgOnTensorsBackend.load`. Each backend will likely have a
@@ -208,8 +207,8 @@
class MpactBackendInvoker:
- def __init__(self, module):
- self.ee = ExecutionEngine(module, opt_level=OPT_LEVEL, shared_libs=SHARED_LIBS)
+ def __init__(self, module, opt_level):
+ self.ee = ExecutionEngine(module, opt_level=opt_level, shared_libs=SHARED_LIBS)
self.result = None
return_funcs = get_return_funcs(module)
@@ -317,8 +316,9 @@
class MpactBackendLinalgOnTensorsBackend(LinalgOnTensorsBackend):
"""Main entry-point for the MPACT backend."""
- def __init__(self):
+ def __init__(self, opt_level):
super().__init__()
+ self.opt_level = opt_level
def compile(self, imported_module: Module):
"""Compiles an imported module, with a flat list of functions.
@@ -339,7 +339,7 @@
def load(self, module) -> MpactBackendInvoker:
"""Loads a compiled artifact into the runtime."""
- return MpactBackendInvoker(module)
+ return MpactBackendInvoker(module, self.opt_level)
def sparse_metadata(a: torch.Tensor) -> SparsityMeta:
@@ -467,7 +467,7 @@
return fx_importer.module
-def mpact_jit_compile(f, *args, **kwargs):
+def mpact_jit_compile(f, *args, opt_level=2, **kwargs):
"""This method compiles the given callable using the MPACT backend."""
# Import module and lower into Linalg IR.
module = export_and_import(f, *args, **kwargs)
@@ -482,7 +482,7 @@
enable_ir_printing=False,
)
# Compile with MPACT backend.
- backend = MpactBackendLinalgOnTensorsBackend()
+ backend = MpactBackendLinalgOnTensorsBackend(opt_level=opt_level)
compiled = backend.compile(module)
invoker = backend.load(compiled)
return invoker, f
@@ -527,6 +527,7 @@
return invoker.main(*xargs)
+# Convenience wrapper.
def mpact_jit(f, *args, **kwargs):
"""This method compiles and runs the given callable using the MPACT backend."""
invoker, fn = mpact_jit_compile(f, *args, **kwargs)
diff --git a/test/python/sparse_gcn.py b/test/python/sparse_gcn.py
index ce89411..a1109a2 100644
--- a/test/python/sparse_gcn.py
+++ b/test/python/sparse_gcn.py
@@ -29,6 +29,11 @@
# CHECK: [5.7501717 5.7501717 5.7501717 5.7501717]
# CHECK: [4.697952 4.697952 4.697952 4.697952 ]
# CHECK: [3.640687 3.640687 3.640687 3.640687 ]{{\]}}
+# CHECK: mpact compile opt=3
+# CHECK: mpact run
+# CHECK: {{\[}}[4.477828 4.477828 4.477828 4.477828 ]
+# CHECK: [5.7501717 5.7501717 5.7501717 5.7501717]
+# CHECK: [4.697952 4.697952 4.697952 4.697952 ]
#
with torch.no_grad():
# Run it with PyTorch.
@@ -47,3 +52,10 @@
print("mpact run")
res = mpact_jit_run(invoker, fn, inp, adj_mat)
print(res)
+
+ # Run it with MPACT (with separate compile and run steps, given opt_level).
+ print("mpact compile opt=3")
+ invoker, fn = mpact_jit_compile(net, inp, adj_mat, opt_level=3)
+ print("mpact run")
+ res = mpact_jit_run(invoker, fn, inp, adj_mat)
+ print(res)