An example from Convex Optimization page 154
The original equation:
I❤️LA implementation:
given
f ∈ ℝ^(n)
p ∈ ℝ^(n)
∑_i f_i²p_i - (∑_i f_i p_i)²
I❤️LA compiled to C++/Eigen:
/*
given
f ∈ ℝ^(n)
p ∈ ℝ^(n)
∑_i f_i²p_i - (∑_i f_i p_i)²
*/
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <iostream>
#include <set>
struct convex_optimization_154ResultType {
double ret;
convex_optimization_154ResultType(const double & ret)
: ret(ret)
{}
};
convex_optimization_154ResultType convex_optimization_154(
const Eigen::VectorXd & f,
const Eigen::VectorXd & p)
{
const long n = f.size();
assert( p.size() == n );
double sum_0 = 0;
for(int i=1; i<=p.size(); i++){
sum_0 += pow(f[i-1], 2) * p[i-1];
}
double sum_1 = 0;
for(int i=1; i<=p.size(); i++){
sum_1 += f[i-1] * p[i-1];
}
double ret = sum_0 - pow((sum_1), 2);
return convex_optimization_154ResultType(ret);
}
void generateRandomData(Eigen::VectorXd & f,
Eigen::VectorXd & p)
{
const int n = rand()%10;
f = Eigen::VectorXd::Random(n);
p = Eigen::VectorXd::Random(n);
}
int main(int argc, char *argv[])
{
srand((int)time(NULL));
Eigen::VectorXd f;
Eigen::VectorXd p;
generateRandomData(f, p);
convex_optimization_154ResultType func_value = convex_optimization_154(f, p);
std::cout<<"return value:\n"<<func_value.ret<<std::endl;
return 0;
}
I❤️LA compiled to Python/NumPy/SciPy:
"""
given
f ∈ ℝ^(n)
p ∈ ℝ^(n)
∑_i f_i²p_i - (∑_i f_i p_i)²
"""
import numpy as np
import scipy
import scipy.linalg
from scipy import sparse
from scipy.integrate import quad
from scipy.optimize import minimize
class convex_optimization_154ResultType:
def __init__( self, ret):
self.ret = ret
def convex_optimization_154(f, p):
f = np.asarray(f, dtype=np.float64)
p = np.asarray(p, dtype=np.float64)
n = f.shape[0]
assert f.shape == (n,)
assert p.shape == (n,)
sum_0 = 0
for i in range(1, len(p)+1):
sum_0 += np.power(f[i-1], 2) * p[i-1]
sum_1 = 0
for i in range(1, len(p)+1):
sum_1 += f[i-1] * p[i-1]
ret = sum_0 - np.power((sum_1), 2)
return convex_optimization_154ResultType(ret)
def generateRandomData():
n = np.random.randint(10)
f = np.random.randn(n)
p = np.random.randn(n)
return f, p
if __name__ == '__main__':
f, p = generateRandomData()
print("f:", f)
print("p:", p)
func_value = convex_optimization_154(f, p)
print("return value: ", func_value.ret)
I❤️LA compiled to MATLAB:
function output = convex_optimization_154(f, p)
% output = convex_optimization_154(f, p)
%
% given
% f ∈ ℝ^(n)
% p ∈ ℝ^(n)
%
% ∑_i f_i²p_i - (∑_i f_i p_i)²
if nargin==0
warning('generating random input data');
[f, p] = generateRandomData();
end
function [f, p] = generateRandomData()
n = randi(10);
f = randn(n,1);
p = randn(n,1);
end
f = reshape(f,[],1);
p = reshape(p,[],1);
n = size(f, 1);
assert( numel(f) == n );
assert( numel(p) == n );
sum_0 = 0;
for i = 1:size(p,1)
sum_0 = sum_0 + f(i).^2 * p(i);
end
sum_1 = 0;
for i = 1:size(p,1)
sum_1 = sum_1 + f(i) * p(i);
end
ret = sum_0 - (sum_1).^2;
output.ret = ret;
end
I❤️LA compiled to LaTeX:
\documentclass[12pt]{article}
\usepackage{mathdots}
\usepackage[bb=boondox]{mathalfa}
\usepackage{mathtools}
\usepackage{amssymb}
\usepackage{libertine}
\DeclareMathOperator*{\argmax}{arg\,max}
\DeclareMathOperator*{\argmin}{arg\,min}
\usepackage[paperheight=8in,paperwidth=4in,margin=.3in,heightrounded]{geometry}
\let\originalleft\left
\let\originalright\right
\renewcommand{\left}{\mathopen{}\mathclose\bgroup\originalleft}
\renewcommand{\right}{\aftergroup\egroup\originalright}
\begin{document}
\begin{center}
\resizebox{\textwidth}{!}
{
\begin{minipage}[c]{\textwidth}
\begin{align*}
\intertext{given}
\mathit{f} & \in \mathbb{R}^{ \mathit{n}} \\
\mathit{p} & \in \mathbb{R}^{ \mathit{n}} \\
\\
\omit \span \sum_\mathit{i} {\mathit{f}_{ \mathit{i} }}^{2}\mathit{p}_{ \mathit{i} } - {\left( \sum_\mathit{i} \mathit{f}_{ \mathit{i} }\mathit{p}_{ \mathit{i} } \right)}^{2} \\
\end{align*}
\end{minipage}
}
\end{center}
\end{document}
I❤️LA LaTeX output: