Skip to content

Commit 09ad20a

Browse files
authored
[minibench] Drop outliers from benchmark result (#8919)
Currently the result has large variance from outliers, so only use 80% samples in the middle (trimmean 0.2)
1 parent 2051a15 commit 09ad20a

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

extension/benchmark/android/benchmark/app/src/main/java/org/pytorch/minibench/BenchmarkActivity.java

+9-1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import java.io.IOException;
2121
import java.util.ArrayList;
2222
import java.util.Arrays;
23+
import java.util.Collections;
2324
import java.util.List;
2425
import java.util.stream.Collectors;
2526
import org.pytorch.executorch.Module;
@@ -80,11 +81,18 @@ protected void onPostExecute(Void aVoid) {
8081
final List<BenchmarkMetric> results = new ArrayList<>();
8182
// The list of metrics we have atm includes:
8283
// Avg inference latency after N iterations
84+
// Currently the result has large variance from outliers, so only use
85+
// 80% samples in the middle (trimmean 0.2)
86+
Collections.sort(stats.latency);
87+
int resultSize = stats.latency.size();
88+
List<Double> usedLatencyResults =
89+
stats.latency.subList(resultSize / 10, resultSize * 9 / 10);
90+
8391
results.add(
8492
new BenchmarkMetric(
8593
benchmarkModel,
8694
"avg_inference_latency(ms)",
87-
stats.latency.stream().mapToDouble(l -> l).average().orElse(0.0f),
95+
usedLatencyResults.stream().mapToDouble(l -> l).average().orElse(0.0f),
8896
0.0f));
8997
// Model load time
9098
results.add(

0 commit comments

Comments
 (0)