# Course 02975: Introduction to uncertainty quantification for inverse problems
## Installation (extra)
Fenics on colab
```
try:
import dolfin
except ImportError:
!wget "https://fem-on-colab.github.io/releases/fenics-install-real.sh" -O "/tmp/fenics-install.sh" && bash "/tmp/fenics-install.sh"
import dolfin
```
## Chapter 1 code solutions:
### Solutions for Chapter 1: Probably the simplist BIP in the world, The data distribution (on multiplicative noise)
**ex. link:**
https://cuqi-dtu.github.io/CUQI-Book/chapter01/Intro_example.html#id3
##### Question 4: part (What is the corresponding data distribution? )
$$
\begin{align}
b &= Ax + (Ax)e \quad e\sim \text{Gaussian}(0, \sigma^2) \\
b &= Ax(1+e)\\
b &= Ax(e_2) \quad e_2\sim \text{Gaussian}(1, \sigma^2)
\end{align}
$$
by linear transformation of normal distribution (see section [Operations on a single normal variable](https://en.wikipedia.org/wiki/Normal_distribution))
(note:We consider $Ax$ constatnt relative to the random variable $b$ or $e_2$)
$$
b \sim \text{Gaussian}(Ax, \sigma^2(Ax)^2)
$$
##### Question 4: part (Define the data distribution in CUQIpy ...)
```python
b_mult = Gaussian(A@x, lambda x : 0.1*(A@x)**2)
b_mult_given_particular_x = b_mult(x=particular_x)
```
##### Question 4: part (Plot the PDF of the data distribution ...)
```python
plot_pdf_1D(b_mult_given_particular_x, -1.5, 6.5)
# note: I am also plotting the b of the additive noise case for comparison
plot_pdf_1D(b_given_particular_x, -1.5, 6.5)
plt.legend(['b_mult_given_particular_x', 'b_additive_given_particular_x'])
```
# Chapter 2 solutions:
### Section: Prior
##### Subsection: Multivariate Gaussian distribution
```python
# 1.
help(y_samples.plot_ci)
y_samples.plot_ci(90)
# 2.
y_samples.plot_ci(90, exact=y.mean)
# 3.
plt.figure()
y_samples.hist_chain([0], bins=50, density=True)
```
##### Multivariate Gaussian distribution: compute the entropy of the distribution numerically
```python
# 1.
y_ent = Gaussian(0, 0.1)
y_ent_entropy = -1*sp_integrate.quad(lambda val: entropy_integrand(y_ent, val),
-np.inf, np.inf)[0]
print(y_ent_entropy)
# 2.
z_ent = Uniform(-3, 3)
z_ent_entropy = -1*sp_integrate.quad(lambda val: entropy_integrand(z_ent, val),
-3, 3)[0]
print(z_ent_entropy)
```
##### Subsection: Markov Random Fields in CUQIpy
```python
# your code here
# 1.
x_GMRF.sample().plot()
# 2.
plt.figure()
x_Gaussian = Gaussian(mean=np.zeros(n), prec=50)
x_GMRF.sample().plot()
x_Gaussian.sample().plot()
# 3.
plt.figure()
x_GMRF_samples = x_GMRF.sample(100000)
diff_30_31_samples = Samples((x_GMRF_samples.samples[31] - x_GMRF_samples.samples[30]).reshape(1, -1))
print(diff_30_31_samples.mean())
print(diff_30_31_samples.variance())
```
### Section: Gibbs
##### Subsection: Hierarchical Bayesian model
```python
# Your code here
x_samples = x(d=100).sample(1000).plot()
```
##### Subsection: Posterior distribution
```python
# Your code here
# 1.
joint.logd(y=y_data, x=x(d=1000).sample(), d=1000, s=1)
# 2.
marginal_post = posterior(x=x(d=1000).sample(), d=1000)
print(marginal_post)
# 3.
x2 = GMRF(np.zeros(n),prec=1 )
y2 = Gaussian(A@x2, prec=10000)
BP2 = BayesianProblem(x2, y2)
BP2.set_data(y2=y_data)
BP2.UQ(exact=probinfo.exactSolution)
```
##### Subsection: Connection to BayesianProblem class
```python
sigma_post = np.linalg.inv(np.linalg.inv(cov1) + np.linalg.inv(cov_obs))
print("sigma_post: ", sigma_post)
mu_post = sigma_post @ (np.linalg.inv(cov_obs) @ np.array([0.1, 0.2]) + np.linalg.inv(cov1) @ np.array([0, 0]))
x3 = Gaussian(mu_post, sigma_post)
samples_x3 = x3.sample(10000)
# alpha
samples_post.plot_pair()
samples_x3.plot_pair(ax=plt.gca())
```
# Chapter 3
## First notebook
### Poisson target 1
```python=
# your code here
x_true.plot(plot_par=True)
plt.figure()
x_true.plot(plot_par=False)
```
### Poisson target 2
```python=
BP_poisson.UQ(exact=x_true)
```
### Poisson target 3
```python=
target_poisson.logpdf(x=x_true)
```
## Second notebook
### Story 1, part 1
```python=
plot_pdf_2D(target_donut, -4, 4, -4, 4)
MH_fixed_samples.burnthin(1500).plot_pair(
ax=plt.gca())
MH_sampler.scale = 0.005
MH_fixed_samples_small_scale = MH_sampler.sample(Ns, Nb)
plt.figure()
plot_pdf_2D(target_donut, -4, 4, -4, 4)
MH_fixed_samples_small_scale.burnthin(
1500).plot_pair(ax=plt.gca())
```
### Story 1, part 2
```python=
print(MH_fixed_samples.compute_ess()/ 1.4)
print(MH_adapted_samples.compute_ess() /1.5)
```
### Story 2
```python=
MH_samples_funvals = MH_samples.funvals
CWMH_samples_funvals = CWMH_samples.funvals
MH_samples_funvals.plot_ci(exact=x_true)
plt.figure()
CWMH_samples_funvals.plot_ci(exact=x_true)
# --- Coefficient space
plt.figure()
MH_samples.plot_ci(exact=x_true, plot_par=True)
plt.figure()
CWMH_samples.plot_ci(exact=x_true, plot_par=True)
# --- ESS
plt.figure()
plt.plot(MH_samples.compute_ess(), label='MH')
plt.plot(CWMH_samples.compute_ess(), label='CWMH')
plt.legend()
# --- Scale
plt.figure()
plt.plot(MH_sampler.scale*np.ones(MH_sampler.dim), label='MH')
plt.plot(CWMH_sampler.scale, label='CWMH')
plt.legend()
```
### Story 3: ULA
```python=
# your code here
# --- Change scale to 0.01
ULA_sampler_2 = ULA(target=target_donut, scale=0.01, x0=np.array([0,0]))
Ns = 1000
ULA_samples_2 = ULA_sampler_2.sample(Ns)
plot_pdf_2D(target_donut, -4, 4, -4, 4)
ULA_samples_2.plot_pair(ax=plt.gca(), scatter_kwargs={'alpha': 1})
# --- Compare with MH
ULA_samples_10 = ULA_sampler_2.sample(10)
MH_10 = MH(target_donut, scale=0.01, x0=np.array([0, 0]))
MH_samples_10 = MH_10.sample(10, 0)
plt.figure()
plot_pdf_2D(target_donut, -4, 4, -4, 4)
ULA_samples_10.plot_pair(ax=plt.gca(), scatter_kwargs={'alpha': 1})
plt.figure()
plot_pdf_2D(target_donut, -4, 4, -4, 4)
MH_samples_10.plot_pair(ax=plt.gca(), scatter_kwargs={'alpha': 1})
# --- Univariate
plt.figure()
x_uni = Gaussian(0, 1)
ULA_uni = ULA(target=x_uni, scale=1, x0=0)
ULA_samples_uni = ULA_uni.sample(40000)
kde_est = gaussian_kde(ULA_samples_uni.samples[0,:])
grid = np.linspace(-4, 4, 1000)
plt.plot(grid, kde_est(grid), label='ULA', color='b')
plot_pdf_1D(x_uni, -4, 4, color='r', label='True')
plt.legend()
```
### Story 5: NUTS 1
```python=
# ESS and chain for the donut distribution
NUTS_donuts_samples.compute_ess()
NUTS_donuts_samples.plot_trace()
# Draw connecting lines for MALA samples
sampler1 = MALA(target_donut, scale=0.01, x0=np.array([0,0]))
sampler1_samples = sampler1.sample(1000, 500)
plot_pdf_2D(target_donut, -4, 4, -4, 4)
sampler1_samples.plot_pair(
ax=plt.gca(),
scatter_kwargs={'alpha': 1, 'c': 'r'})
plt.plot(
sampler1_samples.samples[0,:],
sampler1_samples.samples[1,:], 'r', alpha=0.5)
# To visualize the step size and tree size
plt.plot(NUTS_donut.epsilon_list)
plt.figure()
plt.plot(NUTS_donut.num_tree_node_list)
```
### Story 5: NUTS 2
```python=
# NUTS for the Poisson problem
np.random.seed(0) # Fix the seed for reproducibility
target_poisson.enable_FD()
NUTS_poisson = NUTS(
target=target_poisson, max_depth=7)
Ns = 100
Nb = 10
NUTS_poisson_samples = NUTS_poisson.sample(Ns, Nb)
NUTS_poisson_samples.funvals.plot_ci(exact=x_true)
print(NUTS_poisson_samples.compute_ess())
```