-
Notifications
You must be signed in to change notification settings - Fork 526
/
Copy pathtargets.bzl
110 lines (101 loc) · 4.25 KB
/
targets.bzl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "get_aten_mode_options", "runtime")
def _operator_registry_preprocessor_flags():
max_kernel_num = native.read_config("executorch", "max_kernel_num", None)
if max_kernel_num != None:
return ["-DMAX_KERNEL_NUM=" + max_kernel_num]
elif not runtime.is_oss:
return select({
"DEFAULT": [],
"fbsource//xplat/executorch/tools/buck/constraints:executorch-max-kernel-num-256": ["-DMAX_KERNEL_NUM=256"],
"fbsource//xplat/executorch/tools/buck/constraints:executorch-max-kernel-num-128": ["-DMAX_KERNEL_NUM=128"],
"fbsource//xplat/executorch/tools/buck/constraints:executorch-max-kernel-num-64": ["-DMAX_KERNEL_NUM=64"],
})
else:
return []
def define_common_targets():
"""Defines targets that should be shared between fbcode and xplat.
The directory containing this targets.bzl file should also contain both
TARGETS and BUCK files that call this function.
"""
runtime.cxx_library(
name = "operator_registry_MAX_NUM_KERNELS_TEST_ONLY",
srcs = ["operator_registry.cpp"],
exported_headers = ["operator_registry.h"],
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
exported_deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/core:evalue",
],
preprocessor_flags = ["-DMAX_KERNEL_NUM=1"],
)
runtime.cxx_library(
name = "thread_parallel_interface",
exported_headers = ["thread_parallel_interface.h"],
exported_deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/core/portable_type/c10/c10:c10",
"//executorch/runtime/platform:platform",
],
# Don't depend on this target, depend on //executorch/extension/threadpool:threadpool.
visibility = [
"//executorch/extension/threadpool/...",
],
)
for aten_mode in get_aten_mode_options():
aten_suffix = "_aten" if aten_mode else ""
runtime.cxx_library(
name = "operator_registry" + aten_suffix,
srcs = ["operator_registry.cpp"],
exported_headers = ["operator_registry.h"],
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
exported_deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/core:evalue" + aten_suffix,
],
preprocessor_flags = _operator_registry_preprocessor_flags(),
)
runtime.cxx_library(
name = "kernel_runtime_context" + aten_suffix,
exported_headers = [
"kernel_runtime_context.h",
],
visibility = [
"//executorch/kernels/...",
"//executorch/runtime/executor/...",
"//executorch/runtime/kernel/...",
"@EXECUTORCH_CLIENTS",
],
exported_deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/platform:platform",
"//executorch/runtime/core:memory_allocator",
"//executorch/runtime/core:event_tracer" + aten_suffix,
# TODO(T147221312): This will eventually depend on exec_aten
# once KernelRuntimeContext support tensor resizing, which is
# why this target supports aten mode.
],
)
runtime.cxx_library(
name = "kernel_includes" + aten_suffix,
exported_headers = [
"kernel_includes.h",
],
visibility = [
"//executorch/runtime/kernel/...",
"//executorch/kernels/...",
"//executorch/kernels/prim_ops/...", # Prim kernels
"@EXECUTORCH_CLIENTS",
],
exported_deps = [
":kernel_runtime_context" + aten_suffix,
"//executorch/runtime/core/exec_aten:lib" + aten_suffix,
"//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix,
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
],
)