axiom
smoothedMaxFunction_hasFDerivAt
{E1 : Type u_1}
{E2 : Type u_2}
[NormedAddCommGroup E1]
[NormedSpace ℝ E1]
[FiniteDimensional ℝ E1]
[NormedAddCommGroup E2]
[NormedSpace ℝ E2]
[FiniteDimensional ℝ E2]
(Q2 : Set E2)
(A : E1 →L[ℝ] E2 →L[ℝ] ℝ)
(phihat d2 : E2 → ℝ)
(μ : ℝ)
(uμ : E1 → E2)
:
let fμ := SmoothedMaxFunction Q2 A phihat d2 μ;
let A' := { toFun := fun (x : E1) => ↑(A x), map_add' := ⋯, map_smul' := ⋯ };
∀ (x : E1), HasFDerivAt fμ (LinearMap.toContinuousLinearMap ((AdjointOperator A') (uμ x))) x
Assumed differentiability of the smoothed max-function (Danskin-type formula).
axiom
smoothedMaxFunction_gradient_lipschitz
{E1 : Type u_1}
{E2 : Type u_2}
[NormedAddCommGroup E1]
[NormedSpace ℝ E1]
[FiniteDimensional ℝ E1]
[NormedAddCommGroup E2]
[NormedSpace ℝ E2]
[FiniteDimensional ℝ E2]
(Q2 : Set E2)
(A : E1 →L[ℝ] E2 →L[ℝ] ℝ)
(phihat d2 : E2 → ℝ)
(μ σ2 : ℝ)
(uμ : E1 → E2)
:
let A' := { toFun := fun (x : E1) => ↑(A x), map_add' := ⋯, map_smul' := ⋯ };
∃ (Lμ : ℝ),
Lμ = 1 / (μ * σ2) * OperatorNormDef A' ^ 2 ∧ LipschitzWith Lμ.toNNReal fun (x : E1) => LinearMap.toContinuousLinearMap ((AdjointOperator A') (uμ x))
Assumed Lipschitz continuity of the smoothed max-function gradient.
axiom
smoothedMaxFunction_contDiff
{E1 : Type u_1}
{E2 : Type u_2}
[NormedAddCommGroup E1]
[NormedSpace ℝ E1]
[FiniteDimensional ℝ E1]
[NormedAddCommGroup E2]
[NormedSpace ℝ E2]
[FiniteDimensional ℝ E2]
(Q2 : Set E2)
(A : E1 →L[ℝ] E2 →L[ℝ] ℝ)
(phihat d2 : E2 → ℝ)
(μ : ℝ)
:
ContDiff ℝ (↑1) (SmoothedMaxFunction Q2 A phihat d2 μ)
Assumed C^1 smoothness of the smoothed max-function.
theorem
smoothedMaxFunction_key_inequality
{E1 : Type u_1}
{E2 : Type u_2}
[NormedAddCommGroup E1]
[NormedSpace ℝ E1]
[FiniteDimensional ℝ E1]
[NormedAddCommGroup E2]
[NormedSpace ℝ E2]
[FiniteDimensional ℝ E2]
(Q2 : Set E2)
(A : E1 →L[ℝ] E2 →L[ℝ] ℝ)
(phihat d2 : E2 → ℝ)
(μ σ2 : ℝ)
(hμ : 0 < μ)
(hconv : StrongConvexOn Q2 σ2 d2)
(hphi : ConvexOn ℝ Q2 phihat)
(uμ : E1 → E2)
(hmax : ∀ (x : E1), IsSmoothedMaximizer Q2 A phihat d2 μ x (uμ x))
:
Key inequality for the smoothed maximizers, assuming convexity of phihat.
theorem
smoothedMaxFunction_properties
{E1 : Type u_1}
{E2 : Type u_2}
[NormedAddCommGroup E1]
[NormedSpace ℝ E1]
[FiniteDimensional ℝ E1]
[NormedAddCommGroup E2]
[NormedSpace ℝ E2]
[FiniteDimensional ℝ E2]
(Q2 : Set E2)
(A : E1 →L[ℝ] E2 →L[ℝ] ℝ)
(phihat d2 : E2 → ℝ)
(μ σ2 : ℝ)
(hμ : 0 < μ)
(hσ2 : 0 < σ2)
(hconv : StrongConvexOn Q2 σ2 d2)
(uμ : E1 → E2)
(hmax : ∀ (x : E1), IsSmoothedMaximizer Q2 A phihat d2 μ x (uμ x))
:
have fμ := SmoothedMaxFunction Q2 A phihat d2 μ;
have A' := { toFun := fun (x : E1) => ↑(A x), map_add' := ⋯, map_smul' := ⋯ };
ContDiff ℝ (↑1) fμ ∧ ConvexOn ℝ Set.univ fμ ∧ (∀ (x : E1), HasFDerivAt fμ (LinearMap.toContinuousLinearMap ((AdjointOperator A') (uμ x))) x) ∧ ∃ (Lμ : ℝ),
Lμ = 1 / (μ * σ2) * OperatorNormDef A' ^ 2 ∧ LipschitzWith Lμ.toNNReal fun (x : E1) => LinearMap.toContinuousLinearMap ((AdjointOperator A') (uμ x))
Theorem 1.2.1.
Assume μ > 0 and that d2 is σ2-strongly convex on Q2. Then f_μ defined by (2.5) is
well-defined and continuously differentiable for all x ∈ E1. Moreover, f_μ is convex and
∇ f_μ(x) = A* u_μ(x) (equation (2.6)). The gradient is Lipschitz continuous with constant
L_μ = (1/(μ σ2)) ‖A‖_{1,2}^2 (equation (eq:L_mu)).