Skip to content

Commit 1e7d832

Browse files
Figure of Merit implementation (#3363)
Co-authored-by: Paul Romano <paul.k.romano@gmail.com>
1 parent 512df2f commit 1e7d832

File tree

3 files changed

+62
-1
lines changed

3 files changed

+62
-1
lines changed

docs/source/methods/tallies.rst

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,33 @@ of this is that the longer you run a simulation, the better you know your
387387
results. Therefore, by running a simulation long enough, it is possible to
388388
reduce the stochastic uncertainty to arbitrarily low levels.
389389

390+
Figure of Merit
391+
+++++++++++++++
392+
393+
The figure of merit (FOM) is an indicator that accounts for both the statistical
394+
uncertainty and the execution time and represents how much information is
395+
obtained per unit time in the simulation. The FOM is defined as
396+
397+
.. math::
398+
:label: figure_of_merit
399+
400+
FOM = \frac{1}{r^2 t},
401+
402+
where :math:`t` is the total execution time and :math:`r` is the relative error
403+
defined as
404+
405+
.. math::
406+
:label: relative_error
407+
408+
r = \frac{s_\bar{X}}{\bar{x}}.
409+
410+
Based on this definition, one can see that a higher FOM is desirable. The FOM is
411+
useful as a comparative tool. For example, if a variance reduction technique is
412+
being applied to a simulation, the FOM with variance reduction can be compared
413+
to the FOM without variance reduction to ascertain whether the reduction in
414+
variance outweighs the potential increase in execution time (e.g., due to
415+
particle splitting).
416+
390417
Confidence Intervals
391418
++++++++++++++++++++
392419

openmc/tallies.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,10 @@ class Tally(IDManagerMixin):
9595
An array containing the sample mean for each bin
9696
std_dev : numpy.ndarray
9797
An array containing the sample standard deviation for each bin
98+
figure_of_merit : numpy.ndarray
99+
An array containing the figure of merit for each bin
100+
101+
.. versionadded:: 0.15.3
98102
derived : bool
99103
Whether or not the tally is derived from one or more other tallies
100104
sparse : bool
@@ -127,6 +131,7 @@ def __init__(self, tally_id=None, name=''):
127131
self._sum_sq = None
128132
self._mean = None
129133
self._std_dev = None
134+
self._simulation_time = None
130135
self._with_batch_statistics = False
131136
self._derived = False
132137
self._sparse = False
@@ -385,6 +390,9 @@ def _read_results(self):
385390
self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape)
386391
self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(), self._sum_sq.shape)
387392

393+
# Read simulation time (needed for figure of merit)
394+
self._simulation_time = f["runtime"]["simulation"][()]
395+
388396
# Indicate that Tally results have been read
389397
self._results_read = True
390398

@@ -462,6 +470,16 @@ def std_dev(self):
462470
else:
463471
return self._std_dev
464472

473+
@property
474+
def figure_of_merit(self):
475+
mean = self.mean
476+
std_dev = self.std_dev
477+
fom = np.zeros_like(mean)
478+
nonzero = np.abs(mean) > 0
479+
fom[nonzero] = 1.0 / (
480+
(std_dev[nonzero] / mean[nonzero])**2 * self._simulation_time)
481+
return fom
482+
465483
@property
466484
def with_batch_statistics(self):
467485
return self._with_batch_statistics

tests/unit_tests/test_tallies.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import numpy as np
2-
2+
import pytest
33
import openmc
44

55

@@ -96,6 +96,22 @@ def test_tally_equivalence():
9696
assert tally_a == tally_b
9797

9898

99+
def test_figure_of_merit(sphere_model, run_in_tmpdir):
100+
# Run model with a few simple tally scores
101+
tally = openmc.Tally()
102+
tally.scores = ['total', 'absorption', 'scatter']
103+
sphere_model.tallies = [tally]
104+
sp_path = sphere_model.run(apply_tally_results=True)
105+
106+
# Get execution time and relative error
107+
with openmc.StatePoint(sp_path) as sp:
108+
time = sp.runtime['simulation']
109+
rel_err = tally.std_dev / tally.mean
110+
111+
# Check that figure of merit is calculated correctly
112+
assert tally.figure_of_merit == pytest.approx(1 / (rel_err**2 * time))
113+
114+
99115
def test_tally_application(sphere_model, run_in_tmpdir):
100116
# Create a tally with most possible gizmos
101117
tally = openmc.Tally(name='test tally')

0 commit comments

Comments
 (0)