%
foo = 1;
% First setup some training data...
[train_in train_out test_in true_out] = make_data(0.4, 2, 0.1);
% Plot it...
clf
plot(train_in, train_out, 'ro', test_in, true_out)
% Try to approximate it using a smoothness penalty.
approxsize = 50;
target = train_out;
inputs = train_in;
% The number of targets
tsize = size(target,1);
% WEIGHT is a matrix which relates our predicted function, Y, to the TARGET
% vector. It is almost all zeros with one value of 1 in each row.
weight = zeros(tsize,approxsize);
% Set the appropriate weights to 1.
% Warning: this relies on a trick: the function is piecewise constant with
% a different value for each integer. By rounding the INPUTS we
% can compute which bin we fall into.
in_index = floor(inputs);
weight(:, in_index) = eye(size(in_index,1));
% A simple prior is to penalize based on the gradient.
deriv = make_deriv_matrix(approxsize,2); % Second deriv...
% Compute the optimal function.
eta = 10;
yhat = inv(weight'*weight + eta * deriv'*deriv) * weight' * target;
% Graphing code follows.
bins = (1:approxsize)';
[px, py] = histgraph(yhat', bins');
% In figure one plot the data, the correct function, and the approximation.
figure(1)
clf
hold on
plot(py,px, 'LineWidth', 3)
plot(train_in, train_out, 'ro', test_in, true_out, 'g', 'LineWidth', 3, 'MarkerSize', 10);
axis tight
hold off
figure(2)
clf
hold on
for i = 4:4
[px, py] = histgraph((make_deriv_matrix(approxsize,i) * yhat)', bins');
plot(py,px)
end