Convex Optimization page 384

An example from Convex Optimization page 384

The original equation:

placeholder

I❤️LA implementation:

given
a_i ∈ ℝ^n : the measurement vectors  
x ∈ ℝ^n   : original vector 
w_i ∈ ℝ   : measurement noise 

y_i = a_iᵀ x + w_i
x̂ = (∑_i a_i a_iᵀ)⁻¹ ∑_i y_i a_i

I❤️LA compiled to C++/Eigen:

/*
given
a_i ∈ ℝ^n : the measurement vectors  
x ∈ ℝ^n   : original vector 
w_i ∈ ℝ   : measurement noise 

y_i = a_iᵀ x + w_i
x̂ = (∑_i a_i a_iᵀ)⁻¹ ∑_i y_i a_i
*/
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <iostream>
#include <set>

struct convex_optimization_384ResultType {
    Eigen::VectorXd y;
    Eigen::VectorXd x̂;
    convex_optimization_384ResultType(const Eigen::VectorXd & y,
               const Eigen::VectorXd & x̂)
    : y(y),
    x̂(x̂)
    {}
};

/**
 * convex_optimization_384
 *
 * @param a  the measurement vectors  
 * @param x  original vector 
 * @param w  measurement noise 
 * @return x̂
 */
convex_optimization_384ResultType convex_optimization_384(
    const std::vector<Eigen::VectorXd> & a,
    const Eigen::VectorXd & x,
    const std::vector<double> & w)
{
    const long dim_0 = w.size();
    const long n = a[0].rows();
    assert( a.size() == dim_0 );
    for( const auto& el : a ) {
        assert( el.size() == n );
    }
    assert( x.size() == n );

    Eigen::VectorXd y(dim_0);
    for( int i=1; i<=dim_0; i++){
        y[i-1] = (double)(a.at(i-1).transpose() * x) + w.at(i-1);
    }

    Eigen::MatrixXd sum_0 = Eigen::MatrixXd::Zero(n, n);
    for(int i=1; i<=a.size(); i++){
        sum_0 += a.at(i-1) * a.at(i-1).transpose();
    }
    Eigen::MatrixXd sum_1 = Eigen::MatrixXd::Zero(n, 1);
    for(int i=1; i<=a.size(); i++){
        sum_1 += y[i-1] * a.at(i-1);
    }
    Eigen::VectorXd x̂ = (sum_0).colPivHouseholderQr().solve(sum_1);

    return convex_optimization_384ResultType(y, x̂);
}


void generateRandomData(std::vector<Eigen::VectorXd> & a,
    Eigen::VectorXd & x,
    std::vector<double> & w)
{
    const int dim_0 = rand()%10;
    const int n = rand()%10;
    a.resize(dim_0);
    for(int i=0; i<dim_0; i++){
        a[i] = Eigen::VectorXd::Random(n);
    }
    x = Eigen::VectorXd::Random(n);
    w.resize(dim_0);
    for(int i=0; i<dim_0; i++){
        w[i] = rand() % 10;
    }
}


int main(int argc, char *argv[])
{
    srand((int)time(NULL));
    std::vector<Eigen::VectorXd> a;
    Eigen::VectorXd x;
    std::vector<double> w;
    generateRandomData(a, x, w);
    convex_optimization_384ResultType func_value = convex_optimization_384(a, x, w);
    std::cout<<"return value:\n"<<func_value.x̂<<std::endl;
    return 0;
}

I❤️LA compiled to Python/NumPy/SciPy:

"""
given
a_i ∈ ℝ^n : the measurement vectors  
x ∈ ℝ^n   : original vector 
w_i ∈ ℝ   : measurement noise 

y_i = a_iᵀ x + w_i
x̂ = (∑_i a_i a_iᵀ)⁻¹ ∑_i y_i a_i
"""
import numpy as np
import scipy
import scipy.linalg
from scipy import sparse
from scipy.integrate import quad
from scipy.optimize import minimize


class convex_optimization_384ResultType:
    def __init__( self, y, x̂):
        self.y = y
        self.x̂ = x̂


def convex_optimization_384(a, x, w):
    """
    :param :a : the measurement vectors  
    :param :x : original vector 
    :param :w : measurement noise 
    """
    a = np.asarray(a, dtype=np.float64)
    x = np.asarray(x, dtype=np.float64)
    w = np.asarray(w, dtype=np.float64)

    dim_0 = w.shape[0]
    n = a.shape[1]
    assert a.shape == (dim_0, n, )
    assert x.shape == (n,)
    assert w.shape == (dim_0,)

    y = np.zeros(dim_0)
    for i in range(1, dim_0+1):
        y[i-1] = (a[i-1].T.reshape(1, n) @ x).item() + w[i-1]
    sum_0 = np.zeros((n, n))
    for i in range(1, len(a)+1):
        sum_0 += (a[i-1]).reshape(n, 1) @ a[i-1].T.reshape(1, n)
    sum_1 = np.zeros((n, ))
    for i in range(1, len(a)+1):
        sum_1 += y[i-1] * a[i-1]
    x̂ = np.linalg.solve((sum_0), sum_1)
    return convex_optimization_384ResultType(y, x̂)


def generateRandomData():
    dim_0 = np.random.randint(10)
    n = np.random.randint(10)
    a = np.random.randn(dim_0, n, )
    x = np.random.randn(n)
    w = np.random.randn(dim_0)
    return a, x, w


if __name__ == '__main__':
    a, x, w = generateRandomData()
    print("a:", a)
    print("x:", x)
    print("w:", w)
    func_value = convex_optimization_384(a, x, w)
    print("return value: ", func_value.x̂)

I❤️LA compiled to MATLAB:

function output = convex_optimization_384(a, x, w)
% output = convex_optimization_384(a, x, w)
%
%    given
%    a_i ∈ ℝ^n : the measurement vectors  
%    x ∈ ℝ^n   : original vector 
%    w_i ∈ ℝ   : measurement noise 
%    
%    y_i = a_iᵀ x + w_i
%    x̂ = (∑_i a_i a_iᵀ)⁻¹ ∑_i y_i a_i
    if nargin==0
        warning('generating random input data');
        [a, x, w] = generateRandomData();
    end
    function [a, x, w] = generateRandomData()
        dim_0 = randi(10);
        n = randi(10);
        a = randn(dim_0,n);
        x = randn(n,1);
        w = randn(dim_0,1);
    end

    x = reshape(x,[],1);
    w = reshape(w,[],1);

    dim_0 = size(w, 1);
    n = size(a, 2);
    assert( isequal(size(a), [dim_0, n]) );
    assert( numel(x) == n );
    assert( size(w,1) == dim_0 );

    y = zeros(dim_0,1);
    for i = 1:dim_0
        y(i) = a(i,:)'' * x + w(i);
    end
    sum_0 = zeros(n, n);
    for i = 1:size(a, 1)
        sum_0 = sum_0 + reshape(a(i,:)', [n, 1]) * a(i,:)'';
    end
    sum_1 = zeros(n,1);
    for i = 1:size(a, 1)
        sum_1 = sum_1 + y(i) * a(i,:)';
    end
    x_hat = ((sum_0)\sum_1);
    output.y = y;

    output.x_hat = x_hat;
end

I❤️LA compiled to LaTeX:

\documentclass[12pt]{article}
\usepackage{mathdots}
\usepackage[bb=boondox]{mathalfa}
\usepackage{mathtools}
\usepackage{amssymb}
\usepackage{libertine}
\DeclareMathOperator*{\argmax}{arg\,max}
\DeclareMathOperator*{\argmin}{arg\,min}
\usepackage[paperheight=8in,paperwidth=4in,margin=.3in,heightrounded]{geometry}
\let\originalleft\left
\let\originalright\right
\renewcommand{\left}{\mathopen{}\mathclose\bgroup\originalleft}
\renewcommand{\right}{\aftergroup\egroup\originalright}
\begin{document}

\begin{center}
\resizebox{\textwidth}{!} 
{
\begin{minipage}[c]{\textwidth}
\begin{align*}
\intertext{given} 
\mathit{a}_{\mathit{i}} & \in \mathbb{R}^{ \mathit{n}} \text{ the measurement vectors  } \\
\mathit{x} & \in \mathbb{R}^{ \mathit{n}} \text{ original vector } \\
\mathit{w}_{\mathit{i}} & \in \mathbb{R} \text{ measurement noise } \\
\\
\mathit{y}_{ \mathit{i} } & = {\mathit{a}_{ \mathit{i} }}^T\mathit{x} + \mathit{w}_{ \mathit{i} } \\
\textit{x̂} & = \left( \sum_\mathit{i} \mathit{a}_{ \mathit{i} }{\mathit{a}_{ \mathit{i} }}^T \right)^{-1}\sum_\mathit{i} \mathit{y}_{ \mathit{i} }\mathit{a}_{ \mathit{i} } \\
\end{align*}
\end{minipage}
}
\end{center}

\end{document}

I❤️LA LaTeX output: