|
5 | 5 |
|
6 | 6 | from UncertainSCI.distributions import NormalDistribution |
7 | 7 | from UncertainSCI.model_examples import sine_modulation |
8 | | -from UncertainSCI.indexing import TotalDegreeSet |
9 | 8 | from UncertainSCI.pce import PolynomialChaosExpansion |
10 | | -# # Distribution setup |
11 | 9 |
|
| 10 | +from UncertainSCI.vis import piechart_sensitivity, quantile_plot, mean_stdev_plot |
| 11 | + |
| 12 | +## Distribution setup: generating a correlated multivariate normal distribution |
12 | 13 | # Number of parameters |
13 | 14 | dimension = 2 |
14 | 15 |
|
15 | | -# Specifies normal distribution |
| 16 | +# Specifies statistics of normal distribution |
16 | 17 | mean = np.ones(dimension) |
17 | 18 | cov = np.random.randn(dimension, dimension) |
18 | 19 | cov = np.matmul(cov.T, cov)/(4*dimension) # Some random covariance matrix |
19 | 20 |
|
20 | 21 | # Normalize so that parameters with larger index have smaller importance |
21 | 22 | D = np.diag(1/np.sqrt(np.diag(cov)) * 1/(np.arange(1, dimension+1)**2)) |
22 | 23 | cov = D @ cov @ D |
23 | | - |
24 | 24 | dist = NormalDistribution(mean=mean, cov=cov, dim=dimension) |
25 | 25 |
|
26 | | -# # Indices setup |
27 | | -order = 10 |
28 | | -index_set = TotalDegreeSet(dim=dimension, order=order) |
29 | | - |
30 | | -print('This will query the model {0:d} times'.format(index_set.get_indices().shape[0] + 10)) |
31 | | -# Why +10? That's the default for PolynomialChaosExpansion.build_pce_wafp |
32 | | - |
33 | | -# # Initializes a pce object |
34 | | -pce = PolynomialChaosExpansion(index_set, dist) |
35 | | - |
36 | | -# # Define model |
| 26 | +## Define forward model |
37 | 27 | N = int(1e2) # Number of degrees of freedom of model |
38 | 28 | left = -1. |
39 | 29 | right = 1. |
40 | 30 | x = np.linspace(left, right, N) |
41 | 31 | model = sine_modulation(N=N) |
42 | 32 |
|
43 | | -# # Three equivalent ways to run the PCE model: |
| 33 | +## Polynomial order |
| 34 | +order = 5 |
| 35 | + |
| 36 | +## Initializes a pce object |
| 37 | +pce = PolynomialChaosExpansion(distribution=dist, order=order, plabels=['p1', 'p2']) |
| 38 | +pce.generate_samples() |
44 | 39 |
|
45 | | -# 1 |
46 | | -# Generate samples and query model in one call: |
47 | | -pce = PolynomialChaosExpansion(index_set, dist) |
48 | | -lsq_residuals = pce.build(model, oversampling=10) |
| 40 | +print('This queries the model {0:d} times'.format(pce.samples.shape[0])) |
49 | 41 |
|
| 42 | +# Query model: |
| 43 | +pce.build(model) |
50 | 44 |
|
51 | 45 | # The parameter samples and model evaluations are accessible: |
52 | 46 | parameter_samples = pce.samples |
53 | 47 | model_evaluations = pce.model_output |
54 | 48 |
|
55 | | -# And you could build a second PCE on the same parameter samples |
56 | | -pce2 = PolynomialChaosExpansion(index_set, dist) |
| 49 | +# And if desired you could build a second PCE on the same parameter samples |
| 50 | +pce2 = PolynomialChaosExpansion(distribution=dist, order=order) |
57 | 51 | pce2.set_samples(parameter_samples) |
58 | 52 | pce2.build(model) |
59 | 53 |
|
60 | | -# pce and pce2 have the same coefficients: |
61 | | -# np.linalg.norm( pce.coefficients - pce2.coefficients ) |
62 | | - |
63 | | -# # Postprocess PCE: mean, stdev, sensitivities, quantiles |
64 | | -mean = pce.mean() |
65 | | -stdev = pce.stdev() |
66 | | - |
67 | | - |
68 | | -# Power set of [0, 1, ..., dimension-1] |
69 | | -variable_interactions = list(chain.from_iterable(combinations(range(dimension), r) for r in range(1, dimension+1))) |
70 | | - |
71 | | -# "Total sensitivity" is a non-partitive relative sensitivity measure per parameter. |
72 | | -total_sensitivity = pce.total_sensitivity() |
73 | | - |
74 | | -# "Global sensitivity" is a partitive relative sensitivity measure per set of parameters. |
75 | | -global_sensitivity = pce.global_sensitivity(variable_interactions) |
76 | | - |
77 | | - |
78 | | - |
79 | | -Q = 4 # Number of quantile bands to plot |
80 | | -dq = 0.5/(Q+1) |
81 | | -q_lower = np.arange(dq, 0.5-1e-7, dq)[::-1] |
82 | | -q_upper = np.arange(0.5 + dq, 1.0-1e-7, dq) |
83 | | -quantile_levels = np.append(np.concatenate((q_lower, q_upper)), 0.5) |
84 | | - |
85 | | -quantiles = pce.quantile(quantile_levels, M=int(2e3)) |
86 | | -median = quantiles[-1, :] |
87 | | - |
88 | | -# # For comparison: Monte Carlo statistics |
89 | | -M = 1000 # Generate MC samples |
90 | | -p_phys = dist.MC_samples(M) |
91 | | -output = np.zeros([M, N]) |
92 | | - |
93 | | -for j in range(M): |
94 | | - output[j, :] = model(p_phys[j, :]) |
95 | | - |
96 | | -MC_mean = np.mean(output, axis=0) |
97 | | -MC_stdev = np.std(output, axis=0) |
98 | | -MC_quantiles = np.quantile(output, quantile_levels, axis=0) |
99 | | -MC_median = quantiles[-1, :] |
100 | | - |
101 | | -# # Visualization |
102 | | -V = 50 # Number of MC samples to visualize |
103 | | - |
104 | | -# mean +/- stdev plot |
105 | | -plt.plot(x, output[:V, :].T, 'k', alpha=0.8, linewidth=0.2) |
106 | | -plt.plot(x, mean, 'b', label='PCE mean') |
107 | | -plt.fill_between(x, mean-stdev, mean+stdev, interpolate=True, facecolor='red', alpha=0.5, label='PCE 1 stdev range') |
108 | | - |
109 | | -plt.plot(x, MC_mean, 'b:', label='MC mean') |
110 | | -plt.plot(x, MC_mean+MC_stdev, 'r:', label='MC mean $\\pm$ stdev') |
111 | | -plt.plot(x, MC_mean-MC_stdev, 'r:') |
112 | | - |
113 | | -plt.xlabel('x') |
114 | | -plt.title('Mean $\\pm$ standard deviation') |
115 | | - |
116 | | -plt.legend(loc='lower right') |
117 | | - |
118 | | -# quantile plot |
119 | | -plt.figure() |
120 | | - |
121 | | -plt.subplot(121) |
122 | | -plt.plot(x, output[:V, :].T, 'k', alpha=0.8, linewidth=0.2) |
123 | | -plt.plot(x, median, 'b', label='PCE median') |
124 | | - |
125 | | -band_mass = 1/(2*(Q+1)) |
126 | | - |
127 | | -for ind in range(Q): |
128 | | - alpha = (Q-ind) * 1/Q - (1/(2*Q)) |
129 | | - if ind == 0: |
130 | | - plt.fill_between(x, quantiles[ind, :], quantiles[Q+ind, :], interpolate=True, facecolor='red', |
131 | | - alpha=alpha, label='{0:1.2f} probability mass (each band)'.format(band_mass)) |
132 | | - else: |
133 | | - plt.fill_between(x, quantiles[ind, :], quantiles[Q+ind, :], interpolate=True, facecolor='red', |
134 | | - alpha=alpha) |
135 | | - |
136 | | -plt.title('PCE Median + quantile bands') |
137 | | -plt.xlabel('x') |
138 | | -plt.legend(loc='lower right') |
139 | | - |
140 | | -plt.subplot(122) |
141 | | -plt.plot(x, output[:V, :].T, 'k', alpha=0.8, linewidth=0.2) |
142 | | -plt.plot(x, MC_median, 'b', label='MC median') |
143 | | - |
144 | | -for ind in range(Q): |
145 | | - alpha = (Q-ind) * 1/Q - (1/(2*Q)) |
146 | | - if ind == 0: |
147 | | - plt.fill_between(x, MC_quantiles[ind, :], MC_quantiles[Q+ind, :], interpolate=True, facecolor='red', |
148 | | - alpha=alpha, label='{0:1.2f} probability mass (each band)'.format(band_mass)) |
149 | | - else: |
150 | | - plt.fill_between(x, MC_quantiles[ind, :], MC_quantiles[Q+ind, :], interpolate=True, facecolor='red', alpha=alpha) |
151 | | - |
152 | | -plt.title('MC Median + quantile bands') |
153 | | -plt.xlabel('x') |
154 | | -plt.legend(loc='lower right') |
155 | | - |
156 | | -# Sensitivity pie chart, averaged over all model degrees of freedom |
157 | | -average_global_SI = np.sum(global_sensitivity, axis=1)/N |
158 | | - |
159 | | -labels = ['[' + ' '.join(str(elem) for elem in [i+1 for i in item]) + ']' for item in variable_interactions] |
160 | | -_, ax = plt.subplots() |
161 | | -ax.pie(average_global_SI*100, labels=labels, autopct='%1.1f%%', |
162 | | - startangle=90) |
163 | | -ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. |
164 | | -plt.title('Sensitivity due to variable interactions') |
165 | | - |
166 | | - |
167 | | - |
168 | | - |
169 | | - |
170 | | -sensitivities = [total_sensitivity, global_sensitivity] |
171 | | - |
172 | | -sensbounds = [[0, 1], [0, 1]] |
173 | | - |
174 | | -senslabels = ['Total sensitivity', 'Global sensitivity'] |
175 | | -dimlabels = ['Dimension 1', 'Dimension 2', 'Interaction'] |
176 | | - |
177 | | -fig, ax = plt.subplots(2, 3) |
178 | | -for row in range(2): |
179 | | - for col in range(2): |
180 | | - ax[row][col].plot(x, sensitivities[row][col,:]) |
181 | | - ax[row][col].set_ylim(sensbounds[row]) |
182 | | - if row==2: |
183 | | - ax[row][col].set(xlabel='$x$') |
184 | | - if col==0: |
185 | | - ax[row][col].set(ylabel=senslabels[row]) |
186 | | - if row==0: |
187 | | - ax[row][col].set_title(dimlabels[col]) |
188 | | - if row<2: |
189 | | - ax[row][col].xaxis.set_ticks([]) |
190 | | - if col>0: |
191 | | - ax[row][col].yaxis.set_ticks([]) |
192 | | - |
193 | | -ax[1][2].plot(x, sensitivities[1][2,:]) |
194 | | -ax[1][2].set_ylim(sensbounds[1]) |
195 | | - |
| 54 | +## Visualization |
| 55 | +mean_stdev_plot(pce, ensemble=50) |
| 56 | +quantile_plot(pce, bands=3, xvals=x, xlabel='$x$') |
| 57 | +piechart_sensitivity(pce) |
196 | 58 |
|
197 | 59 | plt.show() |
0 commit comments