-
Notifications
You must be signed in to change notification settings - Fork 7
/
ml-infotheory.tex
12 lines (12 loc) · 1.04 KB
/
ml-infotheory.tex
1
2
3
4
5
6
7
8
9
10
11
12
% basic info theory
\newcommand{\entx}{- \sum_{x \in \Xspace} p(x) \cdot \log p(x)} % entropy of x
\newcommand{\dentx}{- \int_{\Xspace} f(x) \cdot \log f(x) dx} % diff entropy of x
\newcommand{\jentxy}{- \sum_{x \in \Xspace} p(x, y) \cdot \log p(x, y)} % joint entropy of x, y
\newcommand{\jdentxy}{- \int_{\Xspace, \Yspace} f(x, y) \cdot \log f(x, y) dx dy} % joint diff entropy of x, y
\newcommand{\centyx}{- \sum_{x \in \Xspace} p(x) \sum_{y \in \Yspace} p(y|x) \cdot \log p(y|x)} % cond entropy y|x
\newcommand{\cdentyx}{- \int_{\Xspace, \Yspace} f(x, y) \cdot \log f(y | x) dx dy} % cond diff entropy y|x
\newcommand{\xentpq}{- \sum_{x \in \Xspace} p(x) \cdot \log q(x)} % cross-entropy of p, q
\newcommand{\kldpq}{D_{KL}(p \| q)} % KLD between p and q
\newcommand{\kldpqt}{D_{KL}(p \| q_{\thetav})} % KLD divergence between p and parameterized q
\newcommand{\explogpq}{\E_p \left[\log \frac{p(X)}{q(X)} \right]} % expected LLR of p, q (def KLD)
\newcommand{\sumlogpq}{\sum_{x \in \Xspace} p(x) \cdot \log \frac{p(x)}{q(x)}} % expected LLR of p, q (def KLD)