function bnet =  mk_dbn(intra, inter, node_sizes_slice, discrete_nodes_slice, ...
                        equiv_class, observed_nodes_slice, algo, clusters, intra1)
% MK_DBN Make a Dynamic Bayesian Network.
%
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES) makes a DBN with intra-slice adjacency matrix
% INTRA and inter-slice adjacency matrix INTER. NODE_SIZES specifies the arity of each
% node in the slice.
%
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, DISCRETE_NODES) specifies which nodes are discrete;
% the rest are assumed continuous. (Default: all are discrete.)
%
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, DISCRETE_NODES, EQUIV_CLASS) will create the
% specified equivalence classes. EQUIV_CLASS(i) = j means node i gets its params from bnodes{j}.
% The default is that each node in the first two slices gets its own equivalence class, and
% subsequent slices have their parameters tied to the second slice. Note that this is not the
% standard assumption made in HMMs and LDS. See mk_equiv_classes_for_dbn for details.
%
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, DISCRETE_NODES, EQUIV_CLASS, OBS_NODES)
% specifies which nodes will be observed in each slice. (This must be specified if using algo =
% 'hmm' or 'lds' (see below), so we can check if the model is in canonical form.) Otherwise, we
% must specify which nodes are observed when calling ENTER_EVIDENCE.
%
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, DISCRETE_NODES, EQUIV_CLASS, OBS_NODES, ALGO)
% specifies which inference algorithm to use. The choices are:
%  jtree - junction tree algorithm applied to the unrolled (static) network
%  bk - the Boyen-Koller algorithm, which applies jtree to two slices at a time (see below)
%  hmm - the forwards-backwards algorithm for Hidden Markov Models (discrete hidden state space)
%  lds - the forwards-backwards algorithm for Linear Dynamical Systems (Gaussian hidden state space)
%  lw - likelihood weighting applied to the unrolled (static) network
%  sof - survival of the fittest/ particle filtering/ condenstation (like lw, but resamples at each slice)
%  none - doesn't create any auxiliary data structures, so the DBN can be used just for e.g., sampling
% The default is 'bk', which uses the Boyen-Koller approximation algorithm.
% For details, see "Tractable Inference for Complex Stochastic Processes", Boyen and Koller, UAI 1998.
% and "Approximate learning of dynamic models", Boyen and Koller, NIPS 1998.
% To use this algo, you need to specify how to partition the hidden persistent nodes into
% clusters, as follows.
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, DISCRETE_NODES, EQUIV_CLASS, OBS_NODES, 'bk', 'exact')
%  will create one cluster containing the whole slice, which is equivalent to exact inference.
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, DISCRETE_NODES, EQUIV_CLASS, OBS_NODES, 'bk', 'ff')
%  will create one cluster per node. ('ff' stands for 'fully factorized'.)
% BNET = MK_DBN(INTRA, INTER, NODE_SIZES, DISCRETE_NODES, EQUIV_CLASS, OBS_NODES, 'bk', CLUSTERS)
%  will use the specified set of clusters e.g. CLUSTERS = { 1:2, 3:6, 7:8 }.
% Note that BK uses the junction tree algorithm on a 2-slice network.
%
% If ALGO = 'hmm', we use the forwards-backwards algorithm. This is only possible if the DBN
% is in canonical form (see CANONICAL_DBN for a definition) and the hidden nodes are discrete.
% Note that, since this algorithm is much simpler than the above general purpose algorithms,
% it has lower constant factors, and usually runs faster for small networks. However, the
% size of the transition matrix is O(2^(2n)), where n is the number of hidden nodes per
% slice (assumed binary for simplicity). As a rule of thumb, the HMM code is faster if n < 10.
% (This requires the HMM toolbox.)
%
% If ALGO = 'lds' (Linear Dynamical System), we use the Kalman-RTS algorithm. This is the
% analog of the forwards-backwards algorithm in the case that the all the variables are Gaussian.
% The transition matrix has size O(2n), and hence it is possible to convert very large models
% to LDS form. However, junction tree is not too slow in this case either.
% (This requires the Kalman filter toolbox.)
%
% We support the following sampling-based filtering algorithms. (Smoothing is not supported.)
% If ALGO = 'lw', we use likelihood weighting.
% If ALGO = 'sof', we use 'Survival of the Fittest' (aka particle filtering/ condensation)
% For details, see D. Koller and R. Fratkina,
%   "Using learning for approximation in stochastic processes", ICML 98
%
% If ALGO = 'none', we don't create any auxiliary data structures. This can be useful if we
% are interested in using the DBN merely as a specification of a sparse model, e.g. so that we
% can sample from it.
%
% BNET =  MK_DBN(INTRA, INTER, NODE_SIZES_SLICE, DISCRETE_NODES_SLICE, ...
%               EQUIV_CLASS, OBSERVED_NODES_SLICE, ALGO, CLUSTERS, INTRA1)
% Uses the specified intra-slice connectivity matrix for the first slice. Default: INTRA1 = INTRA.
%
% After calling mk_dbn, call BNET = MK_XXX_NODE(BNET, I) for each equivalence class I,
% and then call BNET = COMPILE_PARAMS(BNET).


assert(acyclic(intra,1));

ss = length(intra); % slice size
bnet.slice_size = ss;

if ~exist('discrete_nodes_slice'), discrete_nodes_slice = 1:ss; end
if ~exist('equiv_class'), equiv_class = [1:ss (1:ss)+ss]; end
if ~exist('observed_nodes_slice'), observed_nodes_slice = []; end
if ~exist('algo'), algo = 'bk'; end
if ~exist('clusters'), clusters = 'exact'; end
if ~exist('intra1'), intra1 = intra; end

bnet.name = [];
bnet.algo = algo;
bnet.is_dbn = 1;
bnet.inter = inter;
bnet.intra = intra;
bnet.intra1 = intra1;
% We make the slice specific things row vectors so that they print out nicely.
bnet.node_sizes_slice = node_sizes_slice(:)';
bnet.blocks_slice = node_sizes_slice(:)';
bnet.blocks_slice(discrete_nodes_slice) = 1;
bnet.discrete_slice = discrete_nodes_slice(:)';
bnet.cts_slice = setdiff(1:ss, bnet.discrete_slice);
bnet.equiv_class_slice1 = equiv_class(1:ss);
bnet.equiv_class_slice2 = equiv_class([1:ss]+ss);
bnet.equiv_class = equiv_class;
bnet.observed_slice = observed_nodes_slice(:)';
bnet.hidden_slice = setdiff(1:ss, bnet.observed_slice);
bnet = unroll_dbn(bnet, 2); % make a 2 slice network
bnet.num_slices = 0; % since we don't know the true length yet 
num_equiv_classes = max(bnet.equiv_class(:));
bnet.bnodes = cell(1, num_equiv_classes);
bnet.desired_marginal = [];
bnet.maximize = 0;
bnet.uedges = [];
bnet.cliques = [];
bnet.filter_only = 0;
bnet.useC = 0;
bnet.wait_till_evidence = 0;

switch algo
  case {'hmm', 'lds'}
    assert(canonical_dbn(intra, inter, observed_nodes_slice));
    % No join tree is needed. However, we need to make space to store the posteriors.
    % We don't know how much space is needed until we know how long the sequence is (in enter_evidence).
    bnet.hmm.one_slice_marginal = [];
    bnet.hmm.two_slice_marginal = [];
  case 'jtree',
    bnet.num_slices = 2;
    bnet.jtree = mk_jtree(bnet);
    bnet.num_slices = 0;
    % we can't make the join tree until we know how long the sequence is (in enter_evidence).
  case 'bk',
    pnodes = intersect(persistent_nodes(bnet.inter), bnet.hidden_slice);
    if strcmp(clusters, 'exact')
      %clusters = {pnodes};
      clusters = {1:ss};
    elseif strcmp(clusters, 'ff')
      %clusters = num2cell(pnodes, 1);
      clusters = num2cell(1:ss, 1);
    end
    bnet.dbn.cluster = clusters;
    bnet = mk_jtree_dbn(bnet);
  case {'ls', 'lw'}
    bnet.sample.num_samples = 50;
  case {'sof'},
    bnet.sof.num_samples = 50;
    bnet.sof.active = 1;
    bnet.one_slice_bnet = unroll_dbn(bnet, 1);
  case 'none',
    % 
  otherwise,
    error(['unrecognized inference algorithm ' algo]);
end

