An example from Convex Optimization page 276
The original equation:
I❤️LA implementation:
given
A_i ∈ ℝ^(m_i × n)
b_i ∈ ℝ^m_i
`x₀` ∈ ℝ^n
min_(x ∈ ℝ^n) ∑_i ‖A_i x + b_i‖ + (1/2)‖x-`x₀`‖²
I❤️LA compiled to Python/NumPy/SciPy:
"""
given
A_i ∈ ℝ^(m_i × n)
b_i ∈ ℝ^m_i
`x₀` ∈ ℝ^n
min_(x ∈ ℝ^n) ∑_i ‖A_i x + b_i‖ + (1/2)‖x-`x₀`‖²
"""
import numpy as np
import scipy
import scipy.linalg
from scipy import sparse
from scipy.integrate import quad
from scipy.optimize import minimize
class convex_optimization_276ResultType:
def __init__( self, ret):
self.ret = ret
def convex_optimization_276(A, b, x0):
A = np.asarray(A)
b = np.asarray(b)
x0 = np.asarray(x0, dtype=np.float64)
dim_0 = A.shape[0]
n = A[0].shape[1]
assert x0.shape == (n,)
def target_0(x):
sum_0 = 0
for i in range(1, len(A)+1):
sum_0 += np.linalg.norm(A[i-1] @ x + b[i-1], 2)
return sum_0 + (1 / 2) * np.power(np.linalg.norm(x - x0, 2), 2)
ret = minimize(target_0, np.zeros(n)).fun
return convex_optimization_276ResultType(ret)
def generateRandomData():
dim_0 = np.random.randint(10)
n = np.random.randint(10)
b = []
A = []
for i in range(dim_0):
m_0 = np.random.randint(10)
b.append(np.random.randn(m_0))
A.append(np.random.randn(m_0, n))
x0 = np.random.randn(n)
return A, b, x0
if __name__ == '__main__':
A, b, x0 = generateRandomData()
print("A:", A)
print("b:", b)
print("x0:", x0)
func_value = convex_optimization_276(A, b, x0)
print("return value: ", func_value.ret)
I❤️LA compiled to MATLAB:
function output = convex_optimization_276(A, b, x0)
% output = convex_optimization_276(A, b, `x₀`)
%
% given
% A_i ∈ ℝ^(m_i × n)
% b_i ∈ ℝ^m_i
% `x₀` ∈ ℝ^n
%
% min_(x ∈ ℝ^n) ∑_i ‖A_i x + b_i‖ + (1/2)‖x-`x₀`‖²
if nargin==0
warning('generating random input data');
[A, b, x0] = generateRandomData();
end
function [A, b, x0] = generateRandomData()
dim_0 = randi(10);
n = randi(10);
b = {};
A = {};
for i = 1:dim_0
m_2 = randi(10);
b = [b; randn(m_2)];
A = [A; randn(m_2, n)];
end
x0 = randn(n,1);
end
x0 = reshape(x0,[],1);
dim_0 = size(A, 1);
n = size(A{1}, 2);
assert( numel(x0) == n );
function ret = target_1(x)
sum_0 = 0;
for i = 1:size(A, 1)
sum_0 = sum_0 + norm(A{i} * x + b{i}, 2);
end
ret = sum_0 + (1 / 2) * norm(x - x0, 2).^2;
end
[~,optimize_0] = fminunc(@target_1,zeros(n,1));
ret = optimize_0;
output.ret = ret;
end
I❤️LA compiled to LaTeX:
\documentclass[12pt]{article}
\usepackage{mathdots}
\usepackage[bb=boondox]{mathalfa}
\usepackage{mathtools}
\usepackage{amssymb}
\usepackage{libertine}
\DeclareMathOperator*{\argmax}{arg\,max}
\DeclareMathOperator*{\argmin}{arg\,min}
\usepackage[paperheight=8in,paperwidth=4in,margin=.3in,heightrounded]{geometry}
\let\originalleft\left
\let\originalright\right
\renewcommand{\left}{\mathopen{}\mathclose\bgroup\originalleft}
\renewcommand{\right}{\aftergroup\egroup\originalright}
\begin{document}
\begin{center}
\resizebox{\textwidth}{!}
{
\begin{minipage}[c]{\textwidth}
\begin{align*}
\intertext{given}
\mathit{A}_{\mathit{i}} & \in \mathbb{R}^{ \mathit{m}_{\mathit{i}} \times \mathit{n} } \\
\mathit{b}_{\mathit{i}} & \in \mathbb{R}^{ \mathit{m}_{\mathit{i}}} \\
\textit{x₀} & \in \mathbb{R}^{ \mathit{n}} \\
\\
\omit \span \begin{aligned} \min_{\mathit{x} \in \mathbb{R}^{ \mathit{n}}} \quad & \sum_\mathit{i} \left\|\mathit{A}_{ \mathit{i} }\mathit{x} + \mathit{b}_{ \mathit{i} }\right\|_2 + \left( \frac{1}{2} \right)\left\|\mathit{x} - \textit{x₀}\right\|_2^{2} \\
\end{aligned} \\
\end{align*}
\end{minipage}
}
\end{center}
\end{document}
I❤️LA LaTeX output: