The Python class
moarchiving.BiobjectiveNondominatedSortedList
implements a bi-objective non-dominated archive with list
as parent class. It is heavily based on the bisect
module. It provides easy and fast access to the overall hypervolume, the contributing hypervolume of each element, and to the uncrowded hypervolume improvement of any given point in objective space.
Either via
pip install git+https://github.com/CMA-ES/moarchiving.git@master
or simply via
pip install moarchiving
The single file moarchiving.py
(from the moarchiving/
folder) can also be directly used by itself when copied in the current folder or in a path visible to Python (e.g. a path contained in sys.path
).
moarchiving
uses the fractions.Fraction
type to avoid rounding errors when computing hypervolume differences, but its usage can also easily switched off by assigning the respective class attribute.
moarchiving.BiobjectiveNondominatedSortedList
¶import doctest
import moarchiving
NA = moarchiving.BiobjectiveNondominatedSortedList
doctest.testmod(moarchiving.moarchiving)
# NA.make_expensive_asserts = True
import numpy as np
def nondom_arch(n):
return np.abs(np.linspace(-1, 1, 2 * n).reshape(2, n).T).tolist()
# nondom_arch(3)
import fractions
id_ = lambda x: x
rg = np.arange(0.1, 1000)
Fraction
¶%timeit [id_(i) for i in rg] # expect 120mics
%timeit [float(i) for i in rg] # expect 170mics
%timeit [fractions.Fraction(i) for i in rg] # expect 2.7ms
a = moarchiving.BiobjectiveNondominatedSortedList(
[[-0.749, -1.188], [-0.557, 1.1076],
[0.2454, 0.4724], [-1.146, -0.110]], [10, 10])
a._asserts()
a
>>> for i in range(len(a)):
... assert a.contributing_hypervolume(i) == a.contributing_hypervolumes[i]
>>> assert all(map(lambda x, y: x - 1e-9 < y < x + 1e-9,
... a.contributing_hypervolumes,
... [4.01367, 11.587422]))
a.dominators([1, 3]) == a
a.add([-1, -3]) # return index where the element was added
a
a.add([-1.5, 44])
a
a.dominates(a[0])
a.dominates([-1.2, 1])
a._asserts()
NA.merge = NA.add_list # merge disappeared
b = NA(a)
b.merge([[-1.2, 1]])
# print(b)
a.add_list([[-1.2, 1]])
# print(a)
assert b == a
a.merge(b)
assert b == a
r = np.random.randn(100_000, 2).tolist()
r2 = sorted(np.random.randn(200, 2).tolist())
assert NA(r).add_list(r2) == NA(r).merge(r2)
%%timeit a = NA(r) # expect 390mics
a.add_list(r2)
%%timeit a = NA(r) # expect 290mics
a.merge(r2)
%timeit nondom_arch(1_000)
%timeit nondom_arch(10_000)
%timeit nondom_arch(100_000) # just checking baseline, expect 16ms
%timeit NA(nondom_arch(1_000))
%timeit NA(nondom_arch(10_000)) # expect 10ms
%timeit NA(nondom_arch(100_000)) # expect 112ms, nondom_arch itself takes about 25%
randars = {} # prepare input lists
for n in [1_000, 10_000, 100_000]:
randars[n] = np.random.rand(n, 2).tolist()
len(NA(nondom_arch(100_000))), len(NA(randars[100_000]))
%timeit NA(randars[1_000])
%timeit NA(randars[10_000]) # expect 9ms
%timeit NA(randars[100_000]) # expect 180 ms
%timeit sorted(nondom_arch(1_000))
%timeit sorted(nondom_arch(10_000)) # expect 1.2ms
%timeit sorted(nondom_arch(100_000)) # expect 21ms
%timeit sorted(randars[1_000])
%timeit sorted(randars[10_000]) # expect 5ms
%timeit sorted(randars[100_000]) # expect 110ms
%timeit list(randars[1_000])
%timeit list(randars[10_000])
%timeit list(randars[100_000]) # expect 1ms
1 ms `list`
22 ms `sorted` on sorted list
130 ms `sorted` on unsorted list
110 ms archive on sorted nondominated list
190 ms archive on list which needs pruning (was 1300ms)
add
¶%%timeit a = NA(nondom_arch(1_000)) # expect 7.3ms
for i in range(1000):
a.add([ai - 1e-4 for ai in a[np.random.randint(len(a))]])
len(a)
%%timeit a = NA(nondom_arch(10_000)) # expect 7.7ms
for i in range(1000):
a.add([ai - 1e-4 for ai in a[np.random.randint(len(a))]])
len(a)
%%timeit a = NA(nondom_arch(100_000)) # expect 15ms
for i in range(1000):
a.add([ai - 1e-4 for ai in a[np.random.randint(len(a))]])
len(a) # deletion kicks in and makes it 20 times slower if implemented with pop
%%timeit a = NA(nondom_arch(100_000)) # expect 10ms
for i in range(1000):
a.add([ai - 1e-8 for ai in a[np.random.randint(len(a))]])
len(a) # no deletion has taken place
%%timeit a = NA(nondom_arch(1_000_000)) # expect 270ms
for i in range(1000):
a.add([ai - 1e-4 for ai in a[np.random.randint(len(a))]])
len(a) # deletion kicks in
%%timeit a = NA(nondom_arch(1_000_000)) # expect 12ms
for i in range(1000):
a.add([ai - 1e-8 for ai in a[np.random.randint(len(a))]])
len(a)
%timeit a = NA(nondom_arch(1_000), [5, 5]) # expect 28ms, takes 4 or 40x longer than without hypervolume computation
%timeit a = NA(nondom_arch(10_000), [5, 5]) # expect 300ms
%timeit a = NA(nondom_arch(100_000), [5, 5]) # expect 3s, takes 3x longer than without hypervolume computation
%%timeit a = NA(nondom_arch(1_000), [5, 5]) # expect 220ns
a.hypervolume
%%timeit a = NA(nondom_arch(10_000), [5, 5]) # expect 210ns
a.hypervolume
%%timeit a = NA(nondom_arch(100_000), [5, 5]) # expect 225ns
a.hypervolume
NA.hypervolume_computation_float_type = float
%timeit a = NA(nondom_arch(1_000), [5, 5]) # expect 11ms, takes 4 or 40x longer than without hypervolume computation
NA.hypervolume_final_float_type = float
NA.hypervolume_computation_float_type = float
%timeit a = NA(nondom_arch(1_000), [5, 5]) # expect 3.8ms, takes 4 or 40x longer than without hypervolume computation