Fast Lattice GP¶
In [1]:
Copied!
import fastgps
import torch
import numpy as np
import fastgps
import torch
import numpy as np
In [2]:
Copied!
torch.set_default_dtype(torch.float64)
torch.set_default_dtype(torch.float64)
True Function¶
In [3]:
Copied!
d = 2
f = lambda x: x[:,1]*torch.sin(x[:,0])+x[:,0]*torch.cos(x[:,1])
f0 = lambda x: x[:,1]*torch.cos(x[:,0])+torch.cos(x[:,1])
f1 = lambda x: torch.sin(x[:,0])-x[:,0]*torch.sin(x[:,1])
derivatives = [
torch.tensor([0,0]),
torch.tensor([1,0]),
torch.tensor([0,1]),
]
rng = torch.Generator().manual_seed(17)
x = torch.rand((2**7,d),generator=rng) # random testing locations
y = torch.cat([f(x)[None,:],f0(x)[None,:],f1(x)[None,:]],dim=0) # true values at random testing locations
z = torch.rand((2**8,d),generator=rng) # other random locations at which to evaluate covariance
print("x.shape = %s"%str(tuple(x.shape)))
print("y.shape = %s"%str(tuple(y.shape)))
print("z.shape = %s"%str(tuple(z.shape)))
d = 2
f = lambda x: x[:,1]*torch.sin(x[:,0])+x[:,0]*torch.cos(x[:,1])
f0 = lambda x: x[:,1]*torch.cos(x[:,0])+torch.cos(x[:,1])
f1 = lambda x: torch.sin(x[:,0])-x[:,0]*torch.sin(x[:,1])
derivatives = [
torch.tensor([0,0]),
torch.tensor([1,0]),
torch.tensor([0,1]),
]
rng = torch.Generator().manual_seed(17)
x = torch.rand((2**7,d),generator=rng) # random testing locations
y = torch.cat([f(x)[None,:],f0(x)[None,:],f1(x)[None,:]],dim=0) # true values at random testing locations
z = torch.rand((2**8,d),generator=rng) # other random locations at which to evaluate covariance
print("x.shape = %s"%str(tuple(x.shape)))
print("y.shape = %s"%str(tuple(y.shape)))
print("z.shape = %s"%str(tuple(z.shape)))
x.shape = (128, 2) y.shape = (3, 128) z.shape = (256, 2)
Construct Fast GP¶
In [4]:
Copied!
fgp = fastgps.FastGPLattice(d,seed_for_seq=7,num_tasks=len(derivatives),derivatives=derivatives,alpha=2)
x_next = fgp.get_x_next(n=[2**6,2**3,2**8])
y_next = [f(x_next[0]),f0(x_next[1]),f1(x_next[2])]
fgp.add_y_next(y_next)
assert len(x_next)==len(y_next)
for i in range(len(x_next)):
print("i = %d"%i)
print("\tx_next[%d].shape = %s"%(i,str(tuple(x_next[i].shape))))
print("\ty_next[%d].shape = %s"%(i,str(tuple(y_next[i].shape))))
fgp = fastgps.FastGPLattice(d,seed_for_seq=7,num_tasks=len(derivatives),derivatives=derivatives,alpha=2)
x_next = fgp.get_x_next(n=[2**6,2**3,2**8])
y_next = [f(x_next[0]),f0(x_next[1]),f1(x_next[2])]
fgp.add_y_next(y_next)
assert len(x_next)==len(y_next)
for i in range(len(x_next)):
print("i = %d"%i)
print("\tx_next[%d].shape = %s"%(i,str(tuple(x_next[i].shape))))
print("\ty_next[%d].shape = %s"%(i,str(tuple(y_next[i].shape))))
i = 0 x_next[0].shape = (64, 2) y_next[0].shape = (64,) i = 1 x_next[1].shape = (8, 2) y_next[1].shape = (8,) i = 2 x_next[2].shape = (256, 2) y_next[2].shape = (256,)
In [5]:
Copied!
pmean = fgp.post_mean(x)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("l2 relative error =",(torch.linalg.norm(y-pmean,dim=1)/torch.linalg.norm(y,dim=1)))
pmean = fgp.post_mean(x)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("l2 relative error =",(torch.linalg.norm(y-pmean,dim=1)/torch.linalg.norm(y,dim=1)))
pmean.shape = (3, 128) l2 relative error = tensor([0.2317, 4.4932, 4.7753])
In [6]:
Copied!
data = fgp.fit()
list(data.keys())
data = fgp.fit()
list(data.keys())
iter of 5.0e+03 | loss | term1 | term2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 0.00e+00 | 7.68e+02 | 3.51e+01 | 8.98e+02 5.00e+00 | 5.49e+02 | 2.58e+02 | 2.37e+02 1.00e+01 | 5.42e+02 | 2.87e+02 | 1.94e+02 1.50e+01 | 5.22e+02 | 3.19e+02 | 1.21e+02 2.00e+01 | 4.79e+02 | 2.99e+02 | 5.73e+01 2.50e+01 | 4.07e+02 | 3.01e+02 | -9.08e+01 3.00e+01 | 2.37e+02 | 4.59e+02 | -5.88e+02 3.50e+01 | 2.11e+02 | 3.09e+02 | -4.89e+02 4.00e+01 | 2.12e+02 | 3.01e+02 | -4.80e+02 4.50e+01 | 2.11e+02 | 3.20e+02 | -5.01e+02 5.00e+01 | 2.11e+02 | 3.21e+02 | -5.01e+02 5.50e+01 | 2.11e+02 | 3.27e+02 | -5.08e+02 6.00e+01 | 2.11e+02 | 3.33e+02 | -5.14e+02 6.40e+01 | 2.11e+02 | 3.27e+02 | -5.08e+02
Out[6]:
['iterations']
In [7]:
Copied!
pmean,pvar,q,ci_low,ci_high = fgp.post_ci(x,confidence=0.99)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("pvar.shape = %s"%str(tuple(pvar.shape)))
print("q = %.2f"%q)
print("ci_low.shape = %s"%str(tuple(ci_low.shape)))
print("ci_high.shape = %s"%str(tuple(ci_high.shape)))
print("l2 relative error =",(torch.linalg.norm(y-pmean,dim=1)/torch.linalg.norm(y,dim=1)))
pcov = fgp.post_cov(x,x)
print("pcov.shape = %s"%str(tuple(pcov.shape)))
_range0,_rangen1 = torch.arange(pcov.size(0)),torch.arange(pcov.size(-1))
assert torch.allclose(pcov[_range0,_range0][:,_rangen1,_rangen1],pvar) and (pvar>=0).all()
pcov2 = fgp.post_cov(x,z)
print("pcov2.shape = %s"%str(tuple(pcov2.shape)))
pmean,pvar,q,ci_low,ci_high = fgp.post_ci(x,confidence=0.99)
print("pmean.shape = %s"%str(tuple(pmean.shape)))
print("pvar.shape = %s"%str(tuple(pvar.shape)))
print("q = %.2f"%q)
print("ci_low.shape = %s"%str(tuple(ci_low.shape)))
print("ci_high.shape = %s"%str(tuple(ci_high.shape)))
print("l2 relative error =",(torch.linalg.norm(y-pmean,dim=1)/torch.linalg.norm(y,dim=1)))
pcov = fgp.post_cov(x,x)
print("pcov.shape = %s"%str(tuple(pcov.shape)))
_range0,_rangen1 = torch.arange(pcov.size(0)),torch.arange(pcov.size(-1))
assert torch.allclose(pcov[_range0,_range0][:,_rangen1,_rangen1],pvar) and (pvar>=0).all()
pcov2 = fgp.post_cov(x,z)
print("pcov2.shape = %s"%str(tuple(pcov2.shape)))
pmean.shape = (3, 128) pvar.shape = (3, 128) q = 2.58 ci_low.shape = (3, 128) ci_high.shape = (3, 128) l2 relative error = tensor([ 0.1363, 12.1602, 1.0216])
pcov.shape = (3, 3, 128, 128) pcov2.shape = (3, 3, 128, 256)
In [8]:
Copied!
pcmean,pcvar,q,cci_low,cci_high = fgp.post_cubature_ci(confidence=0.99)
print("pcmean =",pcmean)
print("pcvar =",pcvar)
print("cci_low =",cci_low)
print("cci_high",cci_high)
pcmean,pcvar,q,cci_low,cci_high = fgp.post_cubature_ci(confidence=0.99)
print("pcmean =",pcmean)
print("pcvar =",pcvar)
print("cci_low =",cci_low)
print("cci_high",cci_high)
pcmean = tensor([240.3550, 240.3550, 240.3550]) pcvar = tensor([0., 0., 0.]) cci_low = tensor([240.3550, 240.3550, 240.3550]) cci_high tensor([240.3550, 240.3550, 240.3550])
Project and Increase Sample Size¶
In [9]:
Copied!
n_new = fgp.n*torch.tensor([4,2,8])
pcov_future = fgp.post_cov(x,z,n=n_new)
pvar_future = fgp.post_var(x,n=n_new)
pcvar_future = fgp.post_cubature_var(n=n_new)
n_new = fgp.n*torch.tensor([4,2,8])
pcov_future = fgp.post_cov(x,z,n=n_new)
pvar_future = fgp.post_var(x,n=n_new)
pcvar_future = fgp.post_cubature_var(n=n_new)
In [10]:
Copied!
x_next = fgp.get_x_next(n_new)
y_next = [f(x_next[0]),f0(x_next[1]),f1(x_next[2])]
for _y in y_next:
print(_y.shape)
fgp.add_y_next(y_next)
print("l2 relative error =",(torch.linalg.norm(y-fgp.post_mean(x),dim=1)/torch.linalg.norm(y,dim=1)))
assert torch.allclose(fgp.post_cov(x,z),pcov_future)
assert torch.allclose(fgp.post_var(x),pvar_future)
assert torch.allclose(fgp.post_cubature_var(),pcvar_future)
x_next = fgp.get_x_next(n_new)
y_next = [f(x_next[0]),f0(x_next[1]),f1(x_next[2])]
for _y in y_next:
print(_y.shape)
fgp.add_y_next(y_next)
print("l2 relative error =",(torch.linalg.norm(y-fgp.post_mean(x),dim=1)/torch.linalg.norm(y,dim=1)))
assert torch.allclose(fgp.post_cov(x,z),pcov_future)
assert torch.allclose(fgp.post_var(x),pvar_future)
assert torch.allclose(fgp.post_cubature_var(),pcvar_future)
torch.Size([192]) torch.Size([8]) torch.Size([1792]) l2 relative error = tensor([ 0.1199, 23.1675, 1.0979])
In [11]:
Copied!
data = fgp.fit(verbose=False)
print("l2 relative error =",(torch.linalg.norm(y-fgp.post_mean(x),dim=1)/torch.linalg.norm(y,dim=1)))
data = fgp.fit(verbose=False)
print("l2 relative error =",(torch.linalg.norm(y-fgp.post_mean(x),dim=1)/torch.linalg.norm(y,dim=1)))
l2 relative error = tensor([ 0.1294, 27.2263, 1.0124])
In [12]:
Copied!
n_new = fgp.n*torch.tensor([4,8,2])
pcov_new = fgp.post_cov(x,z,n=n_new)
pvar_new = fgp.post_var(x,n=n_new)
pcvar_new = fgp.post_cubature_var(n=n_new)
x_next = fgp.get_x_next(n_new)
y_next = [f(x_next[0]),f0(x_next[1]),f1(x_next[2])]
fgp.add_y_next(y_next)
assert torch.allclose(fgp.post_cov(x,z),pcov_new)
assert torch.allclose(fgp.post_var(x),pvar_new)
assert torch.allclose(fgp.post_cubature_var(),pcvar_new)
n_new = fgp.n*torch.tensor([4,8,2])
pcov_new = fgp.post_cov(x,z,n=n_new)
pvar_new = fgp.post_var(x,n=n_new)
pcvar_new = fgp.post_cubature_var(n=n_new)
x_next = fgp.get_x_next(n_new)
y_next = [f(x_next[0]),f0(x_next[1]),f1(x_next[2])]
fgp.add_y_next(y_next)
assert torch.allclose(fgp.post_cov(x,z),pcov_new)
assert torch.allclose(fgp.post_var(x),pvar_new)
assert torch.allclose(fgp.post_cubature_var(),pcvar_new)
In [ ]:
Copied!