Standard GP¶
In [1]:
Copied!
import fastgps
import torch
import numpy as np
import fastgps
import torch
import numpy as np
In [2]:
Copied!
torch.set_default_dtype(torch.float64)
torch.set_default_dtype(torch.float64)
True Function¶
In [3]:
Copied!
def f_ackley(x, a=20, b=0.2, c=2*np.pi, scaling=32.768):
# https://www.sfu.ca/~ssurjano/ackley.html
assert x.ndim==2
x = 2*scaling*x-scaling
t1 = a*torch.exp(-b*torch.sqrt(torch.mean(x**2,1)))
t2 = torch.exp(torch.mean(torch.cos(c*x),1))
t3 = a+np.exp(1)
y = -t1-t2+t3
return y
d = 1 # dimension
rng = torch.Generator().manual_seed(17)
x = torch.rand((2**7,d),generator=rng) # random testing locations
y = f_ackley(x) # true values at random testing locations
z = torch.rand((2**8,d),generator=rng) # other random locations at which to evaluate covariance
print("x.shape = %s"%str(tuple(x.shape)))
print("y.shape = %s"%str(tuple(y.shape)))
print("z.shape = %s"%str(tuple(z.shape)))
def f_ackley(x, a=20, b=0.2, c=2*np.pi, scaling=32.768):
# https://www.sfu.ca/~ssurjano/ackley.html
assert x.ndim==2
x = 2*scaling*x-scaling
t1 = a*torch.exp(-b*torch.sqrt(torch.mean(x**2,1)))
t2 = torch.exp(torch.mean(torch.cos(c*x),1))
t3 = a+np.exp(1)
y = -t1-t2+t3
return y
d = 1 # dimension
rng = torch.Generator().manual_seed(17)
x = torch.rand((2**7,d),generator=rng) # random testing locations
y = f_ackley(x) # true values at random testing locations
z = torch.rand((2**8,d),generator=rng) # other random locations at which to evaluate covariance
print("x.shape = %s"%str(tuple(x.shape)))
print("y.shape = %s"%str(tuple(y.shape)))
print("z.shape = %s"%str(tuple(z.shape)))
x.shape = (128, 1) y.shape = (128,) z.shape = (256, 1)
Construct GP¶
In [4]:
Copied!
fgp = fastgps.StandardGP(d,seed_for_seq=7)
x_next = fgp.get_x_next(2**6)
y_next = f_ackley(x_next)
fgp.add_y_next(y_next)
print("x_next.shape = %s"%str(tuple(x_next.shape)))
print("y_next.shape = %s"%str(tuple(y_next.shape)))
fgp = fastgps.StandardGP(d,seed_for_seq=7)
x_next = fgp.get_x_next(2**6)
y_next = f_ackley(x_next)
fgp.add_y_next(y_next)
print("x_next.shape = %s"%str(tuple(x_next.shape)))
print("y_next.shape = %s"%str(tuple(y_next.shape)))
x_next.shape = (64, 1) y_next.shape = (64,)
In [5]:
Copied!
pmean = fgp.post_mean(x)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("l2 relative error = %.2e"%(torch.linalg.norm(y-pmean)/torch.linalg.norm(y)))
pmean = fgp.post_mean(x)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("l2 relative error = %.2e"%(torch.linalg.norm(y-pmean)/torch.linalg.norm(y)))
pmean.shape = (128,) l2 relative error = 1.70e-01
In [6]:
Copied!
data = fgp.fit()
list(data.keys())
data = fgp.fit()
list(data.keys())
iter of 5.0e+03 | loss | term1 | term2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 0.00e+00 | 3.19e+06 | 6.38e+06 | -5.54e+02 5.00e+00 | 2.29e+06 | 4.57e+06 | -5.44e+02 1.00e+01 | 7.51e+05 | 1.50e+06 | -4.97e+02 1.50e+01 | 1.98e+03 | 3.62e+03 | 2.31e+02 2.00e+01 | 1.15e+04 | 2.27e+04 | 1.79e+02 2.50e+01 | 2.86e+02 | 1.48e+02 | 3.07e+02 3.00e+01 | 2.44e+02 | 1.74e+01 | 3.53e+02 3.50e+01 | 2.27e+02 | 2.64e+01 | 3.10e+02 4.00e+01 | 2.16e+02 | 7.85e+01 | 2.35e+02 4.50e+01 | 2.14e+02 | 6.94e+01 | 2.41e+02 5.00e+01 | 2.14e+02 | 6.56e+01 | 2.44e+02 5.50e+01 | 2.14e+02 | 6.37e+01 | 2.46e+02 6.00e+01 | 2.14e+02 | 6.38e+01 | 2.46e+02 6.10e+01 | 2.14e+02 | 6.38e+01 | 2.46e+02
Out[6]:
['iterations']
In [7]:
Copied!
pmean,pvar,q,ci_low,ci_high = fgp.post_ci(x,confidence=0.99)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("pvar.shape = %s"%str(tuple(pvar.shape)))
print("q = %.2f"%q)
print("ci_low.shape = %s"%str(tuple(ci_low.shape)))
print("ci_high.shape = %s"%str(tuple(ci_high.shape)))
print("l2 relative error = %.2e"%(torch.linalg.norm(y-pmean)/torch.linalg.norm(y)))
pcov = fgp.post_cov(x,x)
print("pcov.shape = %s"%str(tuple(pcov.shape)))
assert torch.allclose(pcov.diagonal(),pvar) and (pvar>=0).all()
pcov2 = fgp.post_cov(x,z)
print("pcov2.shape = %s"%str(tuple(pcov2.shape)))
pmean,pvar,q,ci_low,ci_high = fgp.post_ci(x,confidence=0.99)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("pvar.shape = %s"%str(tuple(pvar.shape)))
print("q = %.2f"%q)
print("ci_low.shape = %s"%str(tuple(ci_low.shape)))
print("ci_high.shape = %s"%str(tuple(ci_high.shape)))
print("l2 relative error = %.2e"%(torch.linalg.norm(y-pmean)/torch.linalg.norm(y)))
pcov = fgp.post_cov(x,x)
print("pcov.shape = %s"%str(tuple(pcov.shape)))
assert torch.allclose(pcov.diagonal(),pvar) and (pvar>=0).all()
pcov2 = fgp.post_cov(x,z)
print("pcov2.shape = %s"%str(tuple(pcov2.shape)))
pmean.shape = (128,) pvar.shape = (128,) q = 2.58 ci_low.shape = (128,) ci_high.shape = (128,) l2 relative error = 8.29e-02 pcov.shape = (128, 128) pcov2.shape = (128, 256)
In [8]:
Copied!
pcmean,pcvar,q,cci_low,cci_high = fgp.post_cubature_ci(confidence=0.99)
print("pcmean = %.3e"%pcmean)
print("pcvar = %.3e"%pcvar)
print("cci_low = %.3e"%cci_low)
print("cci_high = %.3e"%cci_high)
pcmean,pcvar,q,cci_low,cci_high = fgp.post_cubature_ci(confidence=0.99)
print("pcmean = %.3e"%pcmean)
print("pcvar = %.3e"%pcvar)
print("cci_low = %.3e"%cci_low)
print("cci_high = %.3e"%cci_high)
pcmean = 1.846e+01 pcvar = 1.564e-03 cci_low = 1.836e+01 cci_high = 1.856e+01
Project and Increase Sample Size¶
In [9]:
Copied!
pcov_future = fgp.post_cov(x,z,n=2*fgp.n)
pvar_future = fgp.post_var(x,n=2*fgp.n)
pcvar_future = fgp.post_cubature_var(n=2*fgp.n)
pcov_future = fgp.post_cov(x,z,n=2*fgp.n)
pvar_future = fgp.post_var(x,n=2*fgp.n)
pcvar_future = fgp.post_cubature_var(n=2*fgp.n)
In [10]:
Copied!
x_next = fgp.get_x_next(2*fgp.n)
y_next = f_ackley(x_next)
fgp.add_y_next(y_next)
print("l2 relative error = %.2e"%(torch.linalg.norm(y-fgp.post_mean(x))/torch.linalg.norm(y)))
assert torch.allclose(fgp.post_cov(x,z),pcov_future)
assert torch.allclose(fgp.post_var(x),pvar_future)
assert torch.allclose(fgp.post_cubature_var(),pcvar_future)
x_next = fgp.get_x_next(2*fgp.n)
y_next = f_ackley(x_next)
fgp.add_y_next(y_next)
print("l2 relative error = %.2e"%(torch.linalg.norm(y-fgp.post_mean(x))/torch.linalg.norm(y)))
assert torch.allclose(fgp.post_cov(x,z),pcov_future)
assert torch.allclose(fgp.post_var(x),pvar_future)
assert torch.allclose(fgp.post_cubature_var(),pcvar_future)
l2 relative error = 1.44e-01
In [11]:
Copied!
data = fgp.fit(verbose=False)
print("l2 relative error = %.2e"%(torch.linalg.norm(y-fgp.post_mean(x))/torch.linalg.norm(y)))
data = fgp.fit(verbose=False)
print("l2 relative error = %.2e"%(torch.linalg.norm(y-fgp.post_mean(x))/torch.linalg.norm(y)))
l2 relative error = 6.82e-02
In [12]:
Copied!
pcov_16n = fgp.post_cov(x,z,n=16*fgp.n)
pvar_16n = fgp.post_var(x,n=16*fgp.n)
pcvar_16n = fgp.post_cubature_var(n=16*fgp.n)
x_next = fgp.get_x_next(16*fgp.n)
y_next = f_ackley(x_next)
fgp.add_y_next(y_next)
assert torch.allclose(fgp.post_cov(x,z),pcov_16n)
assert torch.allclose(fgp.post_var(x),pvar_16n)
assert torch.allclose(fgp.post_cubature_var(),pcvar_16n)
pcov_16n = fgp.post_cov(x,z,n=16*fgp.n)
pvar_16n = fgp.post_var(x,n=16*fgp.n)
pcvar_16n = fgp.post_cubature_var(n=16*fgp.n)
x_next = fgp.get_x_next(16*fgp.n)
y_next = f_ackley(x_next)
fgp.add_y_next(y_next)
assert torch.allclose(fgp.post_cov(x,z),pcov_16n)
assert torch.allclose(fgp.post_var(x),pvar_16n)
assert torch.allclose(fgp.post_cubature_var(),pcvar_16n)
In [ ]:
Copied!