diff --git a/nonLean/Proof_finFarkasBartl_simplified.pdf b/FarkasBartl.pdf
similarity index 100%
rename from nonLean/Proof_finFarkasBartl_simplified.pdf
rename to FarkasBartl.pdf
diff --git a/README.md b/README.md
index 0dce97c..9f4b4d8 100644
--- a/README.md
+++ b/README.md
@@ -14,9 +14,5 @@ We extend certain Farkas-like theorems to a setting where coefficients are from
## Main results
-* [Farkas-Bartl theorem](https://github.com/madvorak/duality/blob/f8bb1a10ee5ff811b057cdcdb50b0b668d6e8d75/Duality/FarkasBartl.lean#L216)
+* [Farkas-Bartl theorem](https://github.com/madvorak/duality/blob/f8bb1a10ee5ff811b057cdcdb50b0b668d6e8d75/Duality/FarkasBartl.lean#L216) with [explanation](FarkasBartl.pdf)
* [Strong duality for extended LP](https://github.com/madvorak/duality/blob/f8bb1a10ee5ff811b057cdcdb50b0b668d6e8d75/Duality/LinearProgramming.lean#L1021)
-
-## Technical report
-
-[work in progress](https://github.com/madvorak/duality/blob/main/nonLean/duality.pdf)
diff --git a/nonLean/AI-generated.jpg b/nonLean/AI-generated.jpg
deleted file mode 100644
index 003253f..0000000
Binary files a/nonLean/AI-generated.jpg and /dev/null differ
diff --git a/nonLean/LinearOrderedField.dot b/nonLean/LinearOrderedField.dot
deleted file mode 100644
index 27acb8c..0000000
--- a/nonLean/LinearOrderedField.dot
+++ /dev/null
@@ -1,436 +0,0 @@
-digraph "instance_graph" {
- "Nontrivial" -> "Nonempty";
- "Semigroup" -> "Mul";
- "StrictOrderedCommRing" -> "CommRing";
- "StrictOrderedCommRing" -> "OrderedCommRing";
- "StrictOrderedCommRing" -> "StrictOrderedRing";
- "StrictOrderedCommRing" -> "StrictOrderedCommSemiring";
- "OrderedCommGroup" -> "PartialOrder";
- "OrderedCommGroup" -> "CommGroup";
- "OrderedCommGroup" -> "OrderedCancelCommMonoid";
- "Monoid" -> "Semigroup";
- "Monoid" -> "One";
- "Monoid" -> "MulOneClass";
- "NonUnitalNonAssocRing" -> "Mul";
- "NonUnitalNonAssocRing" -> "AddCommGroup";
- "NonUnitalNonAssocRing" -> "HasDistribNeg";
- "NonUnitalNonAssocRing" -> "NonUnitalNonAssocSemiring";
- "LinearOrderedCancelAddCommMonoid" -> "Ord";
- "LinearOrderedCancelAddCommMonoid" -> "Max";
- "LinearOrderedCancelAddCommMonoid" -> "LinearOrderedAddCommMonoid";
- "LinearOrderedCancelAddCommMonoid" -> "Min";
- "LinearOrderedCancelAddCommMonoid" -> "OrderedCancelAddCommMonoid";
- "SemigroupWithZero" -> "Semigroup";
- "SemigroupWithZero" -> "MulZeroClass";
- "SemigroupWithZero" -> "Zero";
- "Lattice" -> "SemilatticeInf";
- "Lattice" -> "SemilatticeSup";
- "Lattice" -> "Inf";
- "OrderedCommMonoid" -> "PartialOrder";
- "OrderedCommMonoid" -> "CommMonoid";
- "IsDomain" -> "Nontrivial";
- "IsDomain" -> "IsCancelMulZero";
- "OrderedAddCommMonoid" -> "PartialOrder";
- "OrderedAddCommMonoid" -> "AddCommMonoid";
- "CommRing" -> "AddCommGroupWithOne";
- "CommRing" -> "NonUnitalCommRing";
- "CommRing" -> "CommSemiring";
- "CommRing" -> "CommMonoid";
- "CommRing" -> "Ring";
- "StrictOrderedSemiring" -> "Nontrivial";
- "StrictOrderedSemiring" -> "NoMaxOrder";
- "StrictOrderedSemiring" -> "PartialOrder";
- "StrictOrderedSemiring" -> "OrderedSemiring";
- "StrictOrderedSemiring" -> "CharZero";
- "StrictOrderedSemiring" -> "OrderedCancelAddCommMonoid";
- "StrictOrderedSemiring" -> "Semiring";
- "AddCommGroupWithOne" -> "One";
- "AddCommGroupWithOne" -> "IntCast";
- "AddCommGroupWithOne" -> "AddCommMonoidWithOne";
- "AddCommGroupWithOne" -> "AddCommGroup";
- "AddCommGroupWithOne" -> "NatCast";
- "AddCommGroupWithOne" -> "AddGroupWithOne";
- "One" -> "Nonempty";
- "AddLeftCancelSemigroup" -> "AddSemigroup";
- "AddLeftCancelSemigroup" -> "IsLeftCancelAdd";
- "LinearOrderedCommGroup" -> "OrderedCommGroup";
- "LinearOrderedCommGroup" -> "Ord";
- "LinearOrderedCommGroup" -> "LinearOrder";
- "LinearOrderedCommGroup" -> "Max";
- "LinearOrderedCommGroup" -> "LinearOrderedCancelCommMonoid";
- "LinearOrderedCommGroup" -> "Min";
- "AddCancelCommMonoid" -> "AddLeftCancelMonoid";
- "AddCancelCommMonoid" -> "AddCancelMonoid";
- "AddCancelCommMonoid" -> "AddCommMonoid";
- "NonUnitalNonAssocCommSemiring" -> "CommMagma";
- "NonUnitalNonAssocCommSemiring" -> "NonUnitalNonAssocSemiring";
- "OrderedRing" -> "PartialOrder";
- "OrderedRing" -> "OrderedSemiring";
- "OrderedRing" -> "OrderedAddCommGroup";
- "OrderedRing" -> "Ring";
- "LinearOrderedCommSemiring" -> "LinearOrderedCancelAddCommMonoid";
- "LinearOrderedCommSemiring" -> "Ord";
- "LinearOrderedCommSemiring" -> "Max";
- "LinearOrderedCommSemiring" -> "Min";
- "LinearOrderedCommSemiring" -> "StrictOrderedCommSemiring";
- "LinearOrderedCommSemiring" -> "LinearOrderedSemiring";
- "LinearOrder" -> "Lattice";
- "LinearOrder" -> "Ord";
- "LinearOrder" -> "Max";
- "LinearOrder" -> "PartialOrder";
- "LinearOrder" -> "Min";
- "LinearOrder" -> "DistribLattice";
- "Unique" -> "Inhabited";
- "Unique" -> "Subsingleton";
- "CancelCommMonoidWithZero" -> "IsLeftCancelMulZero";
- "CancelCommMonoidWithZero" -> "CommMonoidWithZero";
- "CancelCommMonoidWithZero" -> "CancelMonoidWithZero";
- "DivisionCommMonoid" -> "DivisionMonoid";
- "DivisionCommMonoid" -> "CommMonoid";
- "GroupWithZero" -> "Nontrivial";
- "GroupWithZero" -> "Inv";
- "GroupWithZero" -> "Div";
- "GroupWithZero" -> "DivInvMonoid";
- "GroupWithZero" -> "MulDivCancelClass";
- "GroupWithZero" -> "MonoidWithZero";
- "IsCancelMul" -> "IsLeftCancelMul";
- "IsCancelMul" -> "IsRightCancelMul";
- "Group" -> "DivisionMonoid";
- "Group" -> "DivInvMonoid";
- "Group" -> "CancelMonoid";
- "SubNegMonoid" -> "Neg";
- "SubNegMonoid" -> "AddMonoid";
- "SubNegMonoid" -> "Sub";
- "DivInvOneMonoid" -> "DivInvMonoid";
- "DivInvOneMonoid" -> "InvOneClass";
- "AddCommMagma" -> "Add";
- "AddLeftCancelMonoid" -> "AddLeftCancelSemigroup";
- "AddLeftCancelMonoid" -> "AddMonoid";
- "AddLeftCancelMonoid" -> "Zero";
- "LawfulBEq" -> "EquivBEq";
- "AddCommMonoidWithOne" -> "AddCommMonoid";
- "AddCommMonoidWithOne" -> "AddMonoidWithOne";
- "MulZeroClass" -> "Mul";
- "MulZeroClass" -> "Zero";
- "Lean.Meta.FastIsEmpty" -> "Lean.Meta.FastSubsingleton";
- "LinearOrderedRing" -> "Ord";
- "LinearOrderedRing" -> "LinearOrder";
- "LinearOrderedRing" -> "Max";
- "LinearOrderedRing" -> "Min";
- "LinearOrderedRing" -> "LinearOrderedAddCommGroup";
- "LinearOrderedRing" -> "StrictOrderedRing";
- "LinearOrderedRing" -> "LinearOrderedSemiring";
- "AddCancelMonoid" -> "AddLeftCancelMonoid";
- "AddCancelMonoid" -> "AddRightCancelMonoid";
- "AddCancelMonoid" -> "IsCancelAdd";
- "PartialOrder" -> "Preorder";
- "AddCommGroup" -> "AddCancelCommMonoid";
- "AddCommGroup" -> "AddCommMonoid";
- "AddCommGroup" -> "AddGroup";
- "AddCommGroup" -> "SubtractionCommMonoid";
- "DivisionMonoid" -> "DivInvOneMonoid";
- "DivisionMonoid" -> "InvolutiveInv";
- "DivisionMonoid" -> "DivInvMonoid";
- "OrderedSemiring" -> "OrderedAddCommMonoid";
- "OrderedSemiring" -> "PartialOrder";
- "OrderedSemiring" -> "ZeroLEOneClass";
- "OrderedSemiring" -> "Semiring";
- "Semifield" -> "Nontrivial";
- "Semifield" -> "Inv";
- "Semifield" -> "NNRatCast";
- "Semifield" -> "Div";
- "Semifield" -> "CommSemiring";
- "Semifield" -> "DivisionSemiring";
- "Semifield" -> "CommGroupWithZero";
- "Setoid" -> "HasEquiv";
- "NonAssocRing" -> "NonUnitalNonAssocRing";
- "NonAssocRing" -> "AddCommGroupWithOne";
- "NonAssocRing" -> "One";
- "NonAssocRing" -> "IntCast";
- "NonAssocRing" -> "NatCast";
- "NonAssocRing" -> "NonAssocSemiring";
- "CommMagma" -> "Mul";
- "NonUnitalCommRing" -> "NonUnitalNonAssocCommRing";
- "NonUnitalCommRing" -> "NonUnitalRing";
- "NonUnitalCommRing" -> "NonUnitalCommSemiring";
- "LinearOrderedAddCommMonoid" -> "OrderedAddCommMonoid";
- "LinearOrderedAddCommMonoid" -> "Ord";
- "LinearOrderedAddCommMonoid" -> "LinearOrder";
- "LinearOrderedAddCommMonoid" -> "Max";
- "LinearOrderedAddCommMonoid" -> "Min";
- "Bot" -> "Nonempty";
- "MulZeroOneClass" -> "MulZeroClass";
- "MulZeroOneClass" -> "MulOneClass";
- "MulZeroOneClass" -> "Zero";
- "CommGroup" -> "DivisionCommMonoid";
- "CommGroup" -> "Group";
- "CommGroup" -> "CommMonoid";
- "CommGroup" -> "CancelCommMonoid";
- "IsEmpty" -> "Subsingleton";
- "Std.ToFormat" -> "Lean.ToMessageData";
- "IsCancelMulZero" -> "IsLeftCancelMulZero";
- "IsCancelMulZero" -> "IsRightCancelMulZero";
- "LinearOrderedCancelCommMonoid" -> "Ord";
- "LinearOrderedCancelCommMonoid" -> "Max";
- "LinearOrderedCancelCommMonoid" -> "Min";
- "LinearOrderedCancelCommMonoid" -> "LinearOrderedCommMonoid";
- "LinearOrderedCancelCommMonoid" -> "OrderedCancelCommMonoid";
- "AddSemigroup" -> "Add";
- "LinearOrderedSemifield" -> "Inv";
- "LinearOrderedSemifield" -> "NNRatCast";
- "LinearOrderedSemifield" -> "LinearOrderedCommSemiring";
- "LinearOrderedSemifield" -> "Semifield";
- "LinearOrderedSemifield" -> "Div";
- "RightCancelMonoid" -> "Monoid";
- "RightCancelMonoid" -> "One";
- "RightCancelMonoid" -> "RightCancelSemigroup";
- "OrderedCommRing" -> "CommRing";
- "OrderedCommRing" -> "OrderedRing";
- "OrderedCommRing" -> "OrderedCommSemiring";
- "InvolutiveInv" -> "Inv";
- "DivInvMonoid" -> "Monoid";
- "DivInvMonoid" -> "Inv";
- "DivInvMonoid" -> "Div";
- "InvOneClass" -> "One";
- "InvOneClass" -> "Inv";
- "LeftCancelSemigroup" -> "Semigroup";
- "LeftCancelSemigroup" -> "IsLeftCancelMul";
- "AddCommMonoid" -> "AddMonoid";
- "AddCommMonoid" -> "AddCommSemigroup";
- "Lean.Eval" -> "Lean.MetaEval";
- "LinearOrderedCommRing" -> "StrictOrderedCommRing";
- "LinearOrderedCommRing" -> "LinearOrderedCommSemiring";
- "LinearOrderedCommRing" -> "LinearOrderedRing";
- "LinearOrderedCommRing" -> "CommMonoid";
- "CommSemiring" -> "CommMonoidWithZero";
- "CommSemiring" -> "Semiring";
- "CommSemiring" -> "NonUnitalCommSemiring";
- "CommSemiring" -> "CommMonoid";
- "AddRightCancelMonoid" -> "AddRightCancelSemigroup";
- "AddRightCancelMonoid" -> "AddMonoid";
- "AddRightCancelMonoid" -> "Zero";
- "CommSemigroup" -> "Semigroup";
- "CommSemigroup" -> "CommMagma";
- "NonUnitalNonAssocCommRing" -> "NonUnitalNonAssocRing";
- "NonUnitalNonAssocCommRing" -> "NonUnitalNonAssocCommSemiring";
- "DivisionSemiring" -> "Nontrivial";
- "DivisionSemiring" -> "Inv";
- "DivisionSemiring" -> "NNRatCast";
- "DivisionSemiring" -> "GroupWithZero";
- "DivisionSemiring" -> "Div";
- "DivisionSemiring" -> "Semiring";
- "OrderedCommSemiring" -> "OrderedSemiring";
- "OrderedCommSemiring" -> "CommSemiring";
- "DistribLattice" -> "Lattice";
- "InvolutiveNeg" -> "Neg";
- "AddGroup" -> "SubNegMonoid";
- "AddGroup" -> "AddCancelMonoid";
- "AddGroup" -> "SubtractionMonoid";
- "AddZeroClass" -> "Add";
- "AddZeroClass" -> "Zero";
- "SemilatticeInf" -> "PartialOrder";
- "SemilatticeInf" -> "Inf";
- "MonoidWithZero" -> "Monoid";
- "MonoidWithZero" -> "SemigroupWithZero";
- "MonoidWithZero" -> "MulZeroOneClass";
- "MonoidWithZero" -> "Zero";
- "LeftCancelMonoid" -> "Monoid";
- "LeftCancelMonoid" -> "One";
- "LeftCancelMonoid" -> "LeftCancelSemigroup";
- "SubNegZeroMonoid" -> "SubNegMonoid";
- "SubNegZeroMonoid" -> "NegZeroClass";
- "NonAssocSemiring" -> "One";
- "NonAssocSemiring" -> "AddCommMonoidWithOne";
- "NonAssocSemiring" -> "NatCast";
- "NonAssocSemiring" -> "MulZeroOneClass";
- "NonAssocSemiring" -> "NonUnitalNonAssocSemiring";
- "EquivBEq" -> "ReflBEq";
- "EquivBEq" -> "PartialEquivBEq";
- "AddRightCancelSemigroup" -> "IsRightCancelAdd";
- "AddRightCancelSemigroup" -> "AddSemigroup";
- "SubtractionMonoid" -> "SubNegMonoid";
- "SubtractionMonoid" -> "InvolutiveNeg";
- "SubtractionMonoid" -> "SubNegZeroMonoid";
- "NonUnitalSemiring" -> "SemigroupWithZero";
- "NonUnitalSemiring" -> "NonUnitalNonAssocSemiring";
- "CommMonoidWithZero" -> "MonoidWithZero";
- "CommMonoidWithZero" -> "CommMonoid";
- "CommMonoidWithZero" -> "Zero";
- "OrderedCancelAddCommMonoid" -> "OrderedAddCommMonoid";
- "OrderedCancelAddCommMonoid" -> "AddCancelCommMonoid";
- "Distrib" -> "Mul";
- "Distrib" -> "LeftDistribClass";
- "Distrib" -> "Add";
- "Distrib" -> "RightDistribClass";
- "LinearOrderedAddCommGroup" -> "LinearOrderedCancelAddCommMonoid";
- "LinearOrderedAddCommGroup" -> "Ord";
- "LinearOrderedAddCommGroup" -> "LinearOrder";
- "LinearOrderedAddCommGroup" -> "Max";
- "LinearOrderedAddCommGroup" -> "Min";
- "LinearOrderedAddCommGroup" -> "OrderedAddCommGroup";
- "CommGroupWithZero" -> "Nontrivial";
- "CommGroupWithZero" -> "Inv";
- "CommGroupWithZero" -> "GroupWithZero";
- "CommGroupWithZero" -> "Div";
- "CommGroupWithZero" -> "CommMonoidWithZero";
- "ToString" -> "Std.ToFormat";
- "ToString" -> "Lean.Eval";
- "AddMonoid" -> "AddSemigroup";
- "AddMonoid" -> "AddZeroClass";
- "AddMonoid" -> "Zero";
- "IsCancelAdd" -> "IsRightCancelAdd";
- "IsCancelAdd" -> "IsLeftCancelAdd";
- "LinearOrderedCommMonoid" -> "OrderedCommMonoid";
- "LinearOrderedCommMonoid" -> "Ord";
- "LinearOrderedCommMonoid" -> "LinearOrder";
- "LinearOrderedCommMonoid" -> "Max";
- "LinearOrderedCommMonoid" -> "Min";
- "CancelMonoid" -> "IsCancelMul";
- "CancelMonoid" -> "RightCancelMonoid";
- "CancelMonoid" -> "LeftCancelMonoid";
- "HasDistribNeg" -> "InvolutiveNeg";
- "Semiring" -> "One";
- "Semiring" -> "NatCast";
- "Semiring" -> "MonoidWithZero";
- "Semiring" -> "NonAssocSemiring";
- "Semiring" -> "NonUnitalSemiring";
- "Inhabited" -> "Nonempty";
- "AddGroupWithOne" -> "IntCast";
- "AddGroupWithOne" -> "Neg";
- "AddGroupWithOne" -> "AddGroup";
- "AddGroupWithOne" -> "AddMonoidWithOne";
- "AddGroupWithOne" -> "Sub";
- "StrictOrderedRing" -> "Nontrivial";
- "StrictOrderedRing" -> "StrictOrderedSemiring";
- "StrictOrderedRing" -> "OrderedRing";
- "StrictOrderedRing" -> "PartialOrder";
- "StrictOrderedRing" -> "OrderedAddCommGroup";
- "StrictOrderedRing" -> "Ring";
- "DivisionRing" -> "Nontrivial";
- "DivisionRing" -> "Inv";
- "DivisionRing" -> "NNRatCast";
- "DivisionRing" -> "Div";
- "DivisionRing" -> "DivInvMonoid";
- "DivisionRing" -> "DivisionSemiring";
- "DivisionRing" -> "RatCast";
- "DivisionRing" -> "Ring";
- "SizeOf" -> "WellFoundedRelation";
- "NonUnitalRing" -> "NonUnitalNonAssocRing";
- "NonUnitalRing" -> "NonUnitalSemiring";
- "NonUnitalCommSemiring" -> "CommSemigroup";
- "NonUnitalCommSemiring" -> "NonUnitalSemiring";
- "AddCommSemigroup" -> "AddCommMagma";
- "AddCommSemigroup" -> "AddSemigroup";
- "MulOneClass" -> "One";
- "MulOneClass" -> "Mul";
- "SubtractionCommMonoid" -> "AddCommMonoid";
- "SubtractionCommMonoid" -> "SubtractionMonoid";
- "RightCancelSemigroup" -> "Semigroup";
- "RightCancelSemigroup" -> "IsRightCancelMul";
- "NonUnitalNonAssocSemiring" -> "Mul";
- "NonUnitalNonAssocSemiring" -> "MulZeroClass";
- "NonUnitalNonAssocSemiring" -> "AddCommMonoid";
- "NonUnitalNonAssocSemiring" -> "Distrib";
- "NegZeroClass" -> "Neg";
- "NegZeroClass" -> "Zero";
- "OrderedAddCommGroup" -> "PartialOrder";
- "OrderedAddCommGroup" -> "AddCommGroup";
- "OrderedAddCommGroup" -> "OrderedCancelAddCommMonoid";
- "StrictOrderedCommSemiring" -> "StrictOrderedSemiring";
- "StrictOrderedCommSemiring" -> "CommSemiring";
- "StrictOrderedCommSemiring" -> "OrderedCommSemiring";
- "Repr" -> "ReprTuple";
- "Repr" -> "Lean.Eval";
- "OrderedCancelCommMonoid" -> "OrderedCommMonoid";
- "OrderedCancelCommMonoid" -> "CancelCommMonoid";
- "LinearOrderedSemiring" -> "StrictOrderedSemiring";
- "LinearOrderedSemiring" -> "Ord";
- "LinearOrderedSemiring" -> "Max";
- "LinearOrderedSemiring" -> "LinearOrderedAddCommMonoid";
- "LinearOrderedSemiring" -> "Min";
- "CommMonoid" -> "Monoid";
- "CommMonoid" -> "CommSemigroup";
- "SemilatticeSup" -> "PartialOrder";
- "SemilatticeSup" -> "Sup";
- "Field" -> "Nontrivial";
- "Field" -> "CommRing";
- "Field" -> "Inv";
- "Field" -> "NNRatCast";
- "Field" -> "Semifield";
- "Field" -> "Div";
- "Field" -> "RatCast";
- "Field" -> "DivisionRing";
- "Ring" -> "IntCast";
- "Ring" -> "AddCommGroup";
- "Ring" -> "NonAssocRing";
- "Ring" -> "Neg";
- "Ring" -> "Semiring";
- "Ring" -> "AddGroupWithOne";
- "Ring" -> "NonUnitalRing";
- "Ring" -> "Sub";
- "Top" -> "Nonempty";
- "CancelMonoidWithZero" -> "IsCancelMulZero";
- "CancelMonoidWithZero" -> "MonoidWithZero";
- "LinearOrderedField" -> "Inv";
- "LinearOrderedField" -> "NNRatCast";
- "LinearOrderedField" -> "Div";
- "LinearOrderedField" -> "LinearOrderedSemifield";
- "LinearOrderedField" -> "LinearOrderedCommRing";
- "LinearOrderedField" -> "RatCast";
- "LinearOrderedField" -> "Field";
- "Preorder" -> "LE";
- "Preorder" -> "LT";
- "AddMonoidWithOne" -> "One";
- "AddMonoidWithOne" -> "NatCast";
- "AddMonoidWithOne" -> "AddMonoid";
- "CancelCommMonoid" -> "LeftCancelMonoid";
- "CancelCommMonoid" -> "CancelMonoid";
- "CancelCommMonoid" -> "CommMonoid";
- "Zero" -> "Nonempty";
- "IsDomain" -> "Semiring" [style=dashed];
- "IsRightCancelAdd" -> "Add" [style=dashed];
- "ExistsMulOfLE" -> "Mul" [style=dashed];
- "ExistsMulOfLE" -> "LE" [style=dashed];
- "NoMinOrder" -> "LT" [style=dashed];
- "NoMaxOrder" -> "LT" [style=dashed];
- "IsCancelMul" -> "Mul" [style=dashed];
- "IsLeftCancelMul" -> "Mul" [style=dashed];
- "LawfulBEq" -> "BEq" [style=dashed];
- "OrderedSub" -> "LE" [style=dashed];
- "OrderedSub" -> "Add" [style=dashed];
- "OrderedSub" -> "Sub" [style=dashed];
- "ZeroLEOneClass" -> "One" [style=dashed];
- "ZeroLEOneClass" -> "LE" [style=dashed];
- "ZeroLEOneClass" -> "Zero" [style=dashed];
- "NoTopOrder" -> "LE" [style=dashed];
- "IsCancelMulZero" -> "Mul" [style=dashed];
- "IsCancelMulZero" -> "Zero" [style=dashed];
- "LawfulHashable" -> "Hashable" [style=dashed];
- "LawfulHashable" -> "BEq" [style=dashed];
- "CharZero" -> "AddMonoidWithOne" [style=dashed];
- "ExistsAddOfLE" -> "LE" [style=dashed];
- "ExistsAddOfLE" -> "Add" [style=dashed];
- "IsLeftCancelMulZero" -> "Mul" [style=dashed];
- "IsLeftCancelMulZero" -> "Zero" [style=dashed];
- "MulDivCancelClass" -> "Div" [style=dashed];
- "MulDivCancelClass" -> "MonoidWithZero" [style=dashed];
- "NoZeroDivisors" -> "Mul" [style=dashed];
- "NoZeroDivisors" -> "Zero" [style=dashed];
- "EquivBEq" -> "BEq" [style=dashed];
- "NoBotOrder" -> "LE" [style=dashed];
- "IsLeftCancelAdd" -> "Add" [style=dashed];
- "LeftDistribClass" -> "Mul" [style=dashed];
- "LeftDistribClass" -> "Add" [style=dashed];
- "ReflBEq" -> "BEq" [style=dashed];
- "IsCancelAdd" -> "Add" [style=dashed];
- "PartialEquivBEq" -> "BEq" [style=dashed];
- "HasDistribNeg" -> "Mul" [style=dashed];
- "DenselyOrdered" -> "LT" [style=dashed];
- "IsCyclic" -> "Pow" [style=dashed];
- "IsAddCyclic" -> "SMul" [style=dashed];
- "IsRightCancelMul" -> "Mul" [style=dashed];
- "IsRightCancelMulZero" -> "Mul" [style=dashed];
- "IsRightCancelMulZero" -> "Zero" [style=dashed];
- "RightDistribClass" -> "Mul" [style=dashed];
- "RightDistribClass" -> "Add" [style=dashed];
-}
\ No newline at end of file
diff --git a/nonLean/LinearOrderedField.svg b/nonLean/LinearOrderedField.svg
deleted file mode 100644
index 1cf884d..0000000
--- a/nonLean/LinearOrderedField.svg
+++ /dev/null
@@ -1,3697 +0,0 @@
-
\ No newline at end of file
diff --git a/nonLean/Proof_finFarkasBartl.pdf b/nonLean/Proof_finFarkasBartl.pdf
deleted file mode 100644
index 8801943..0000000
Binary files a/nonLean/Proof_finFarkasBartl.pdf and /dev/null differ
diff --git a/nonLean/Proof_finFarkasBartl.tex b/nonLean/Proof_finFarkasBartl.tex
deleted file mode 100644
index f0f92f4..0000000
--- a/nonLean/Proof_finFarkasBartl.tex
+++ /dev/null
@@ -1,285 +0,0 @@
-\documentclass[]{article}
-\usepackage[portrait, margin=5mm]{geometry}
-\usepackage{amsmath}
-\usepackage{amssymb}
-\usepackage{amsfonts}
-\usepackage{listings}
-\usepackage{graphicx}
-\pagenumbering{gobble}
-
-\renewcommand{\.}{\hskip .75pt}
-
-\renewcommand{\arraystretch}{1.25}
-
-\newcommand{\fin}[1]{[\.#1\.]}
-
-\DeclareMathOperator{\aand}{\;\wedge\;}
-\DeclareMathOperator{\st}{,\;}
-\DeclareMathOperator{\ex}{\,\exists}
-
-\let\r=\rightarrow
-\let\*=\cdot
-
-\begin{document}
-
-\lstset{
- basicstyle=\ttfamily\small,
- literate=
- {→}{{$\rightarrow$}}1
- {∀}{{$\forall$}}1
- {∃}{{$\exists$}}1
- {×}{{$\times$}}1
- {σ}{{$\sigma$}}1
- {τ}{{$\tau$}}1
- {α}{{$\alpha$}}1
- {γ}{{$\gamma$}}1
- {≠}{{$\neq$}}1
- {≤}{{$\leq$}}1
- {≥}{{$\geq$}}1
- {↔}{{$\leftrightarrow$}}1
- {¬}{{$\neg$}}1
- {∧}{{$\wedge$}}1
- {∨}{{$\vee$}}1
- {•}{$\bullet$}1
- {·}{$\cdot$}1
- {⬝}{$\cdot$}1
- {ℕ}{{$\mathbb{N}$}}1
- {ℤ}{{$\mathbb{Z}$}}1
- {∈}{{$\in$}}1
- {ₗ}{{$_l$}}1
- {₀}{{$_0$}}1
- {∑}{{$\;\sum$}}1
- {ᵀ}{{$^\texttt{T}$}}1
- {ᵥ}{{$_v$}}1
- {ₘ}{{$_m$}}1
- {⁻¹}{{$^{-1}$}}1
- {∞}{{$\infty$}}1
- {⊤}{{$\top$}}1
- {⊥}{{$\bot$}}1
- {⟨}{{$\langle$}}1
- {⟩}{{$\rangle$}}1
- {∘}{{$\circ$}}1
- {▸}{{$\triangleright$}}1
-}
-
-
-\noindent In this section, we prove:
-\begin{lstlisting}
-theorem finFarkasBartl {n : ℕ} [LinearOrderedDivisionRing R]
- [LinearOrderedAddCommGroup V] [Module R V] [PosSMulMono R V] [AddCommGroup W] [Module R W]
- (A : W →ₗ[R] Fin n → R) (b : W →ₗ[R] V) :
- (∃ x : Fin n → V, 0 ≤ x ∧ ∀ w : W, ∑ j : Fin n, A w j • x j = b w) ≠ (∃ y : W, 0 ≤ A y ∧ b y < 0)
-\end{lstlisting}
-We first rephrase the goal to:
-\begin{lstlisting}
-(∃ x : Fin n → V, 0 ≤ x ∧ ∀ w : W, ∑ j : Fin n, A w j • x j = b w) ↔ (∀ y : W, 0 ≤ A y → 0 ≤ b y)
-\end{lstlisting}
-Implication from left to right is satisfied by the following term:
-\begin{lstlisting}
-fun ⟨x, hx, hb⟩ y hy => hb y ▸ Finset.sum_nonneg (fun i _ => smul_nonneg (hy i) (hx i))
-\end{lstlisting}
-Implication from right to left will be proved by induction on \texttt{n} with generalized \texttt{A} and \texttt{b}.
-In case \texttt{n = 0} we immediately have:
-\begin{lstlisting}
-A_tauto (w : W) : 0 ≤ A w
-\end{lstlisting}
-We have an assumption:
-\begin{lstlisting}
-hAb : ∀ y : W, 0 ≤ A y → 0 ≤ b y
-\end{lstlisting}
-We set \texttt{x} to be the empty vector family. Now, for every \texttt{w :~W}, we must prove:
-\begin{lstlisting}
-∑ j : Fin 0, A w j • (0 : Fin 0 → V) j = b w
-\end{lstlisting}
-We simplify the goal to:
-\begin{lstlisting}
-0 = b w
-\end{lstlisting}
-We utilize that \texttt{V} is ordered and prove the equality as two inequalities.
-Inequality \texttt{0 $\le$ b w} is directly satisfied by:
-\begin{lstlisting}
-hAb w (A_tauto w)
-\end{lstlisting}
-Inequality \texttt{b w $\le$ 0} is easily reduced to:
-\begin{lstlisting}
-hAb (-w) (A_tauto (-w))
-\end{lstlisting}
-The induction step is stated as a lemma:
-\begin{lstlisting}
-lemma industepFarkasBartl {m : ℕ} [LinearOrderedDivisionRing R]
- [LinearOrderedAddCommGroup V] [Module R V] [PosSMulMono R V] [AddCommGroup W] [Module R W]
- (ih : ∀ A₀ : W →ₗ[R] Fin m → R, ∀ b₀ : W →ₗ[R] V,
- (∀ y₀ : W, 0 ≤ A₀ y₀ → 0 ≤ b₀ y₀) →
- (∃ x₀ : Fin m → V, 0 ≤ x₀ ∧ ∀ w₀ : W, ∑ i₀ : Fin m, A₀ w₀ i₀ • x₀ i₀ = b₀ w₀))
- {A : W →ₗ[R] Fin m.succ → R} {b : W →ₗ[R] V} (hAb : ∀ y : W, 0 ≤ A y → 0 ≤ b y) :
- ∃ x : Fin m.succ → V, 0 ≤ x ∧ ∀ w : W, ∑ i : Fin m.succ, A w i • x i = b w
-\end{lstlisting}
-First we introduce an auxiliary definition. We define
-\begin{lstlisting}
-a : W →ₗ[R] Fin m → R
-\end{lstlisting}
-as the first \texttt{m} rows of \texttt{A}, i.e., \texttt{A} without the last row:
-\begin{lstlisting}
-a := (fun w : W => fun i : Fin m => A w i.castSucc)
-\end{lstlisting}
-To prove \texttt{industepFarkasBartl} we first consider the easy case:
-\begin{lstlisting}
-is_easy : ∀ y : W, 0 ≤ a y → 0 ≤ b y
-\end{lstlisting}
-From \texttt{ih a b is\_easy} we obtain:
-\begin{lstlisting}
-x : Fin m → V
-hx : 0 ≤ x
-hxb : ∀ w₀ : W, ∑ i₀ : Fin m, a w₀ i₀ • x i₀ = b w₀
-\end{lstlisting}
-The lemma is satisfied by this vector family:
-\begin{lstlisting}
-(fun i : Fin m.succ => if hi : i.val < m then x ⟨i.val, hi⟩ else 0)
-\end{lstlisting}
-Easy case analysis shows that the vector family is nonnegative. In order to prove that, given \texttt{w :~W} in the context,
-\begin{lstlisting}
-∑ i : Fin m.succ, A w i • (fun i : Fin m.succ => if hi : i.val < m then x ⟨i.val, hi⟩ else 0) i = b w
-\end{lstlisting}
-holds, we first transform the goal to:
-\begin{lstlisting}
-∑ i ∈ (Finset.univ.filter (fun k : Fin m.succ => k.val < m)).attach, A w i.val • x ⟨i.val.val, _⟩ = b w
-\end{lstlisting}
-We compare it with \texttt{hxb w} which says:
-\begin{lstlisting}
-∑ i₀ : Fin m, a w i₀ • x i₀ = b w
-\end{lstlisting} \pagebreak[2]
-We finish the proof for the easy case using the following technical lemma (which will also be used in one more place):
-\begin{lstlisting}
-private lemma finishing_piece {m : ℕ} [Semiring R]
- [AddCommMonoid V] [Module R V] [AddCommMonoid W] [Module R W]
- {A : W →ₗ[R] Fin m.succ → R} {w : W} {x : Fin m → V} :
- ∑ i : Fin m, a w i • x i =
- ∑ i : { j : Fin m.succ // j ∈ Finset.univ.filter (·.val < m) }, A w i.val • x ⟨i.val.val, by aesop⟩
-\end{lstlisting}
-Now for the hard case; negation of \texttt{is\_easy} gives us:
-\begin{lstlisting}
-y' : W
-hay' : 0 ≤ a y'
-hby' : b y' < 0
-\end{lstlisting}
-Let us make an alias for the last (new) index, i.e., the term \texttt{M} is just
-the number \texttt{m} converted to the type \texttt{Fin (m+1)}:
-\begin{lstlisting}
-M : Fin m.succ := ⟨m, lt_add_one m⟩
-\end{lstlisting}
-Let \texttt{y} be flipped and rescaled \texttt{y'} as follows:
-\begin{lstlisting}
-y : W := (A y' M)⁻¹ • y'
-\end{lstlisting}
-From \texttt{hAb} we get:
-\begin{lstlisting}
-hAy' : A y' M < 0
-\end{lstlisting}
-Therefore \texttt{hAy'.ne :~A y' M $\neq$ 0}
-implies that \texttt{y} has the property that motivated the rescaling:
-\begin{lstlisting}
-hAy : A y M = 1
-\end{lstlisting}
-From \texttt{hAy} we have:
-\begin{lstlisting}
-hAA : ∀ w : W, A (w - (A w M • y)) M = 0
-\end{lstlisting}
-Using \texttt{hAA} and \texttt{hAb} we prove:
-\begin{lstlisting}
-hbA : ∀ w : W, 0 ≤ a (w - (A w M • y)) → 0 ≤ b (w - (A w M • y))
-\end{lstlisting}
-From \texttt{hbA} we have:
-\begin{lstlisting}
-hbAb : ∀ w : W, 0 ≤ (a - (A · M • a y)) w → 0 ≤ (b - (A · M • b y)) w
-\end{lstlisting}
-We observe that these two terms (appearing in \texttt{hbAb} we just proved) are linear maps:
-\begin{lstlisting}
-(a - (A · M • a y))
-(b - (A · M • b y))
-\end{lstlisting}
-Therefore, we can plug them into \texttt{ih} and provide \texttt{hbAb} as the last argument.
-We obtain:
-\begin{lstlisting}
-x' : Fin m → V
-hx' : 0 ≤ x'
-hxb' : ∀ w₀ : W, ∑ i₀ : Fin m, (a - (A · M • a y)) w₀ i₀ • x' i₀ = (b - (A · M • b y)) w₀
-\end{lstlisting}
-We claim that our lemma is satisfied by this vector family:
-\begin{lstlisting}
-(fun i : Fin m.succ => if hi : i.val < m then x' ⟨i.val, hi⟩ else b y - ∑ i : Fin m, a y i • x' i)
-\end{lstlisting}
-Let us show the nonnegativity first.
-Nonnegativity of everything except of the last vector follows from \texttt{hx'}.
-From \texttt{hAy'} we have:
-\begin{lstlisting}
-hAy'' : (A y' M)⁻¹ ≤ 0
-\end{lstlisting}
-From \texttt{hAy''} with \texttt{hay'} we have:
-\begin{lstlisting}
-hay : a y ≤ 0
-\end{lstlisting}
-From \texttt{hAy''} with \texttt{hby'} converted to nonstrict inequality we have:
-\begin{lstlisting}
-hby : 0 ≤ b y
-\end{lstlisting}
-For the nonnegativity of the last vector, we need to prove:
-\begin{lstlisting}
-∑ i : Fin m, a y i • x' i ≤ b y
-\end{lstlisting}
-It follows from \texttt{hay i} with \texttt{hx' i} and \texttt{hby} using basic properties of inequalities.
-The only remaining task is to show:
-\begin{lstlisting}
-∀ w : W,
- ∑ i : Fin m.succ, (A w i •
- (if hi : i.val < m then x' ⟨i.val, hi⟩ else b y - ∑ i : Fin m, a y i • x' i)
- ) =
- b w
-\end{lstlisting}
-Given general \texttt{w :~W} we make a key observation (using \texttt{hxb' w}):
-\begin{lstlisting}
-haAa : ∑ i : Fin m, (a w i - A w M * a y i) • x' i = b w - A w M • b y
-\end{lstlisting}
-With the help of \texttt{haAa} we transform the goal to:
-\begin{lstlisting}
-∑ i : Fin m.succ,
- (A w i • (if hi : i.val < m then x' ⟨i.val, hi⟩ else b y - ∑ i' : Fin m, a y i' • x' i')) =
-∑ i : Fin m, (a w i - A w M * a y i) • x' i + A w M • b y
-\end{lstlisting}
-From here, the direction should be clear; the rest of the proof is just a manipulation
-with the goal without any additional hypotheses.
-We distribute $\bullet$ over \texttt{if} so that the goal becomes:
-\begin{lstlisting}
-∑ i : Fin m.succ,
- (if hi : i.val < m then A w i • x' ⟨i.val, hi⟩ else A w i • (b y - ∑ i' : Fin m, a y i' • x' i')) =
-∑ i : Fin m, (a w i - A w M * a y i) • x' i + A w M • b y
-\end{lstlisting}
-We split the left-hand side into two parts:
-\begin{lstlisting}
-∑ i ∈ (Finset.univ.filter (fun i : Fin m.succ => i.val < m)).attach,
- A w i.val • x' ⟨i.val.val, _⟩ +
-∑ i ∈ (Finset.univ.filter (fun i : Fin m.succ => ¬(i.val < m))).attach,
- A w i.val • (b y - ∑ i' : Fin m, a y i' • x' i') =
-∑ i : Fin m, (a w i - A w M * a y i) • x' i + A w M • b y
-\end{lstlisting}
-Since the second sum is singleton, it simplifies to:
-\begin{lstlisting}
-∑ i ∈ (Finset.univ.filter (fun i : Fin m.succ => i.val < m)).attach, A w i.val • x' ⟨i.val.val, _⟩ +
-A w M • (b y - ∑ i : Fin m, a y i • x' i) =
-∑ i : Fin m, (a w i - A w M * a y i) • x' i + A w M • b y
-\end{lstlisting}
-After simplifying the right-hand side:
-\begin{lstlisting}
-∑ i ∈ (Finset.univ.filter (fun i : Fin m.succ => i.val < m)).attach, A w i.val • x' ⟨i.val.val, _⟩ +
-A w M • (b y - ∑ i : Fin m, a y i • x' i) =
-∑ i : Fin m, (a w i • x' i) - A w M • ∑ i : Fin m, (a y i • x' i) + A w M • b y
-\end{lstlisting}
-We distribute $\bullet$ over \texttt{-} on the left-hand side:
-\begin{lstlisting}
-∑ i ∈ (Finset.univ.filter (fun i : Fin m.succ => i.val < m)).attach, A w i.val • x' ⟨i.val.val, _⟩ +
-A w M • b y - A w M • (∑ i : Fin m, a y i • x' i) =
-∑ i : Fin m, (a w i • x' i) - A w M • ∑ i : Fin m, (a y i • x' i) + A w M • b y
-\end{lstlisting}
-Exploiting \texttt{finishing\_piece} again, it is easy to finish the proof.
-
-
-\end{document}
diff --git a/nonLean/Proof_finFarkasBartl_simplified.tex b/nonLean/Proof_finFarkasBartl_simplified.tex
deleted file mode 100644
index fcf96b3..0000000
--- a/nonLean/Proof_finFarkasBartl_simplified.tex
+++ /dev/null
@@ -1,309 +0,0 @@
-\documentclass[]{article}
-\usepackage[portrait, margin=5mm]{geometry}
-\usepackage{amsmath}
-\usepackage{amssymb}
-\usepackage{amsfonts}
-\usepackage{listings}
-\usepackage{graphicx}
-\pagenumbering{gobble}
-
-\renewcommand{\.}{\hskip .75pt}
-
-\renewcommand{\arraystretch}{1.25}
-
-\newcommand{\fin}[1]{[\.#1\.]}
-
-\DeclareMathOperator{\aand}{\;\wedge\;}
-\DeclareMathOperator{\st}{,\;}
-\DeclareMathOperator{\ex}{\,\exists}
-
-\let\r=\rightarrow
-\let\*=\cdot
-
-\begin{document}
-
-\lstset{
- basicstyle=\ttfamily\small,
- literate=
- {→}{{$\rightarrow$}}1
- {∀}{{$\forall$}}1
- {∃}{{$\exists$}}1
- {×}{{$\times$}}1
- {σ}{{$\sigma$}}1
- {τ}{{$\tau$}}1
- {α}{{$\alpha$}}1
- {γ}{{$\gamma$}}1
- {≠}{{$\neq$}}1
- {≤}{{$\leq$}}1
- {≥}{{$\geq$}}1
- {↔}{{$\leftrightarrow$}}1
- {¬}{{$\neg$}}1
- {∧}{{$\wedge$}}1
- {∨}{{$\vee$}}1
- {•}{$\bullet$}1
- {·}{$\cdot$}1
- {⬝}{$\cdot$}1
- {ℕ}{{$\mathbb{N}$}}1
- {ℤ}{{$\mathbb{Z}$}}1
- {∈}{{$\in$}}1
- {ₗ}{{$_l$}}1
- {₀}{{$_0$}}1
- {∑}{{$\;\sum$}}1
- {ᵀ}{{$^\texttt{T}$}}1
- {ᵥ}{{$_v$}}1
- {ₘ}{{$_m$}}1
- {⁻¹}{{$^{-1}$}}1
- {∞}{{$\infty$}}1
- {⊤}{{$\top$}}1
- {⊥}{{$\bot$}}1
- {⟨}{{$\langle$}}1
- {⟩}{{$\rangle$}}1
- {∘}{{$\circ$}}1
- {▸}{{$\triangleright$}}1
-}
-
-Gyula Farkas established that a system of linear inequalities has a solution if and only if we cannot obtain
-a contradiction by taking a linear combination of the inequalities:
-\begin{lstlisting}
-theorem equalityFarkas [Fintype I] [Fintype J] [LinearOrderedField F] (A : Matrix I J F) (b : I → F) :
- (∃ x : J → F, 0 ≤ x ∧ A *ᵥ x = b) ≠ (∃ y : I → F, 0 ≤ Aᵀ *ᵥ y ∧ b ⬝ᵥ y < 0)
-\end{lstlisting}
-
-Geometric interpretation of \texttt{equalityFarkas} is as follows.
-The column vectors of \texttt{A} generate a cone in the
-\texttt{|I|}-dimensional Euclidean space from the origin.
-The point \texttt{b} either lies inside this cone (in this case, the entries
-of \texttt{x} give nonnegative coefficients which,
-when applied to the column vectors of \texttt{A},
-give a vector from the origin to the point \texttt{b}),
-or there exists a hyperplane that contains the origin and that
-strictly separates \texttt{b} from given cone
-(in this case, \texttt{y} gives a normal vector of this hyperplane).
-
-The next theorem generalizes \texttt{equalityFarkas} to structures where
-multiplication does not have to be commutative;
-furthermore, it supports infinitely many equations:
-\begin{lstlisting}
-theorem coordinateFarkasBartl {I : Type*} [Fintype J] [LinearOrderedDivisionRing R]
- (A : (I → R) →ₗ[R] J → R) (b : (I → R) →ₗ[R] R) :
- (∃ x : J → R, 0 ≤ x ∧ ∀ w : I → R, ∑ j : J, A w j • x j = b w) ≠ (∃ y : I → R, 0 ≤ A y ∧ b y < 0)
-\end{lstlisting}
-
-In the next generalization, the partially ordered module \texttt{I} $\r$ \texttt{R}
-is replaced by a general \texttt{R}-module \texttt{W}:
-\begin{lstlisting}
-theorem almostFarkasBartl [Fintype J] [LinearOrderedDivisionRing R] [AddCommGroup W] [Module R W]
- (A : W →ₗ[R] J → R) (b : W →ₗ[R] R) :
- (∃ x : J → R, 0 ≤ x ∧ ∀ w : W, ∑ j : J, A w j • x j = b w) ≠ (∃ y : W, 0 ≤ A y ∧ b y < 0)
-\end{lstlisting}
-
-In the most general theorem, stated below, certain occurrences of \texttt{R} are replaced by
-a linearly ordered \texttt{R}-module \texttt{V} whose order respects the order on \texttt{R}:
-\begin{lstlisting}
-theorem fintypeFarkasBartl [Fintype J] [LinearOrderedDivisionRing R]
- [LinearOrderedAddCommGroup V] [Module R V] [PosSMulMono R V] [AddCommGroup W] [Module R W]
- (A : W →ₗ[R] J → R) (b : W →ₗ[R] V) :
- (∃ x : J → V, 0 ≤ x ∧ ∀ w : W, ∑ j : J, A w j • x j = b w) ≠ (∃ y : W, 0 ≤ A y ∧ b y < 0)
-\end{lstlisting}
-
-Note that \texttt{fintypeFarkasBartl} subsumes \texttt{scalarFarkas} as well as the other versions,
-since \texttt{R} can be viewed as a linearly ordered module over itself.
-
-We have hereby stated a three-fold generalization of the original Farkas' result.
-Let's prove it! Our proof, starting on the next page, is based on
-a modern algebraic proof by David Bartl. We first prove a tiny-bit-less-general
-version \texttt{finFarkasBartl} which uses \texttt{Fin n} (i.e., indexing by natural numbers
-between \texttt{0} inclusive and \texttt{n} exclusive) instead of
-an arbitrary (unordered) finite type \texttt{J}.
-In the end, we obtain \texttt{fintypeFarkasBartl} from \texttt{finFarkasBartl} using
-some boring mechanisms regarding equivalence between finite types.
-
-\begin{figure}[ht!]
- \centering
- \includegraphics[width=120mm]{AI-generated.jpg}
- \caption{AI-generated image fills the rest of the first page.}
-\end{figure}
-
-\newpage
-\section*{Proving finFarkasBartl}
-
-In this section, we prove:
-\begin{lstlisting}
-theorem finFarkasBartl {n : ℕ} [LinearOrderedDivisionRing R]
- [LinearOrderedAddCommGroup V] [Module R V] [PosSMulMono R V] [AddCommGroup W] [Module R W]
- (A : W →ₗ[R] Fin n → R) (b : W →ₗ[R] V) :
- (∃ x : Fin n → V, 0 ≤ x ∧ ∀ w : W, ∑ j : Fin n, A w j • x j = b w) ≠ (∃ y : W, 0 ≤ A y ∧ b y < 0)
-\end{lstlisting}
-We first rephrase the goal to:
-\begin{lstlisting}
-(∃ x : Fin n → V, 0 ≤ x ∧ ∀ w : W, ∑ j : Fin n, A w j • x j = b w) ↔ (∀ y : W, 0 ≤ A y → 0 ≤ b y)
-\end{lstlisting}
-Implication from left to right is immediately satisfied by the following term:
-\begin{lstlisting}
-fun ⟨x, hx, hb⟩ y hy => hb y ▸ Finset.sum_nonneg (fun i _ => smul_nonneg (hy i) (hx i))
-\end{lstlisting}
-Implication from right to left will be proved by induction on \texttt{n} with generalized \texttt{A} and \texttt{b}.
-In case \texttt{n = 0} we immediately have:
-\begin{lstlisting}
-A_tauto (w : W) : 0 ≤ A w
-\end{lstlisting}
-We have an assumption:
-\begin{lstlisting}
-hAb : ∀ y : W, 0 ≤ A y → 0 ≤ b y
-\end{lstlisting}
-We set \texttt{x} to be the empty vector family. Now, for every \texttt{w :~W}, we must prove:
-\begin{lstlisting}
-∑ j : Fin 0, A w j • (0 : Fin 0 → V) j = b w
-\end{lstlisting}
-We simplify the goal to:
-\begin{lstlisting}
-0 = b w
-\end{lstlisting}
-We utilize that \texttt{V} is ordered and prove the equality as two inequalities.
-Inequality \texttt{0 $\le$ b w} is directly satisfied by:
-\begin{lstlisting}
-hAb w (A_tauto w)
-\end{lstlisting}
-Inequality \texttt{b w $\le$ 0} is easily reduced to:
-\begin{lstlisting}
-hAb (-w) (A_tauto (-w))
-\end{lstlisting}
-The induction step is stated as a lemma:
-\begin{lstlisting}
-lemma industepFarkasBartl {m : ℕ} [LinearOrderedDivisionRing R]
- [LinearOrderedAddCommGroup V] [Module R V] [PosSMulMono R V] [AddCommGroup W] [Module R W]
- (ih : ∀ A₀ : W →ₗ[R] Fin m → R, ∀ b₀ : W →ₗ[R] V,
- (∀ y₀ : W, 0 ≤ A₀ y₀ → 0 ≤ b₀ y₀) →
- (∃ x₀ : Fin m → V, 0 ≤ x₀ ∧ ∀ w₀ : W, ∑ i₀ : Fin m, A₀ w₀ i₀ • x₀ i₀ = b₀ w₀))
- {A : W →ₗ[R] Fin m.succ → R} {b : W →ₗ[R] V} (hAb : ∀ y : W, 0 ≤ A y → 0 ≤ b y) :
- ∃ x : Fin m.succ → V, 0 ≤ x ∧ ∀ w : W, ∑ i : Fin m.succ, A w i • x i = b w
-\end{lstlisting}
-%First we introduce an auxiliary definition.
-We define
-\begin{lstlisting}
-a : W →ₗ[R] Fin m → R
-\end{lstlisting}
-as the first \texttt{m} rows of \texttt{A} (i.e., \texttt{A} without the last row):
-\begin{lstlisting}
-a := (fun w : W => fun i : Fin m => A w i)
-\end{lstlisting}
-To prove \texttt{industepFarkasBartl} we first consider the easy case:
-\begin{lstlisting}
-is_easy : ∀ y : W, 0 ≤ a y → 0 ≤ b y
-\end{lstlisting}
-From \texttt{ih a b is\_easy} we obtain:
-\begin{lstlisting}
-x : Fin m → V
-hx : 0 ≤ x
-hxb : ∀ w₀ : W, ∑ i₀ : Fin m, a w₀ i₀ • x i₀ = b w₀
-\end{lstlisting}
-The lemma is satisfied by this vector family:
-\begin{lstlisting}
-(fun i : Fin m.succ => if hi : i < m then x i else 0)
-\end{lstlisting}
-Easy case analysis shows that the vector family is nonnegative.
-Now we need to prove:
-\begin{lstlisting}
-∀ w : W, ∑ i : Fin m.succ, A w i • (fun i : Fin m.succ => if hi : i < m then x i else 0) i = b w
-\end{lstlisting}
-We simplify the goal to:
-\begin{lstlisting}
-∀ w : W, ∑ i : Fin m, A w i • x i = b w
-\end{lstlisting}
-This is exactly \texttt{hxb}. \newpage \noindent
-Now for the hard case; negation of \texttt{is\_easy} gives us:
-\begin{lstlisting}
-y' : W
-hay' : 0 ≤ a y'
-hby' : b y' < 0
-\end{lstlisting}
-Let \texttt{y} be flipped and rescaled \texttt{y'} as follows:
-\begin{lstlisting}
-y : W := (A y' m)⁻¹ • y'
-\end{lstlisting}
-From \texttt{hAb} we get:
-\begin{lstlisting}
-hAy' : A y' m < 0
-\end{lstlisting}
-Therefore \texttt{hAy'.ne :~A y' m $\neq$ 0}
-implies that \texttt{y} has the property that motivated the rescaling:
-\begin{lstlisting}
-hAy : A y m = 1
-\end{lstlisting}
-From \texttt{hAy} we have:
-\begin{lstlisting}
-hAA : ∀ w : W, A (w - (A w m • y)) m = 0
-\end{lstlisting}
-Using \texttt{hAA} and \texttt{hAb} we prove:
-\begin{lstlisting}
-hbA : ∀ w : W, 0 ≤ a (w - (A w m • y)) → 0 ≤ b (w - (A w m • y))
-\end{lstlisting}
-From \texttt{hbA} we have:
-\begin{lstlisting}
-hbAb : ∀ w : W, 0 ≤ (a - (A · m • a y)) w → 0 ≤ (b - (A · m • b y)) w
-\end{lstlisting}
-We observe that these two terms (appearing in \texttt{hbAb} we just proved) are linear maps:
-\begin{lstlisting}
-(a - (A · m • a y))
-(b - (A · m • b y))
-\end{lstlisting}
-Therefore, we can plug them into \texttt{ih} and provide \texttt{hbAb} as the last argument.
-We obtain:
-\begin{lstlisting}
-x' : Fin m → V
-hx' : 0 ≤ x'
-hxb' : ∀ w₀ : W, ∑ i₀ : Fin m, (a - (A · m • a y)) w₀ i₀ • x' i₀ = (b - (A · m • b y)) w₀
-\end{lstlisting}
-We claim that our lemma is satisfied by this vector family:
-\begin{lstlisting}
-(fun i : Fin m.succ => if hi : i < m then x' i else b y - ∑ j : Fin m, a y i • x' j)
-\end{lstlisting}
-Let us show the nonnegativity first.
-Nonnegativity of everything except of the last vector follows from \texttt{hx'}.
-From \texttt{hAy'} we have:
-\begin{lstlisting}
-hAy'' : (A y' m)⁻¹ ≤ 0
-\end{lstlisting}
-From \texttt{hAy''} with \texttt{hay'} we have:
-\begin{lstlisting}
-hay : a y ≤ 0
-\end{lstlisting}
-From \texttt{hAy''} with \texttt{hby'} converted to nonstrict inequality we have:
-\begin{lstlisting}
-hby : 0 ≤ b y
-\end{lstlisting}
-For the nonnegativity of the last vector, we need to prove:
-\begin{lstlisting}
-∑ j : Fin m, a y j • x' j ≤ b y
-\end{lstlisting}
-It follows from \texttt{hay j} with \texttt{hx' j} and \texttt{hby} using basic properties of inequalities.
-The only remaining task is to show:
-\begin{lstlisting}
-∀ w : W,
- ∑ i : Fin m.succ, (A w i • (if hi : i < m then x' i else b y - ∑ j : Fin m, a y j • x' j)) = b w
-\end{lstlisting}
-Given general \texttt{w :~W} we make a key observation (using \texttt{hxb' w}):
-\begin{lstlisting}
-haAa : ∑ i : Fin m, (a w i - A w m * a y i) • x' i = b w - A w m • b y
-\end{lstlisting}
-With the help of \texttt{haAa} we transform the goal to:
-\begin{lstlisting}
-∑ i : Fin m.succ, (A w i • (if hi : i < m then x' i else b y - ∑ j : Fin m, a y j • x' j)) =
-∑ i : Fin m, (a w i - A w M * a y i) • x' i + A w M • b y
-\end{lstlisting}
-%From here, the direction should be clear; the rest of the proof is just a manipulation
-%with the goal without any additional hypotheses.
-We distribute $\bullet$ over \texttt{if} so that the goal becomes:
-\begin{lstlisting}
-∑ i : Fin m.succ, (if hi : i < m then A w i • x' i else A w i • (b y - ∑ j : Fin m, a y j • x' j)) =
-∑ i : Fin m, (a w i - A w M * a y i) • x' i + A w M • b y
-\end{lstlisting}
-We split the left-hand side into two parts:
-\begin{lstlisting}
-∑ i : Fin m, (a w i • x' i) + A w M • (b y - ∑ j : Fin m, a y j • x' j) =
-∑ i : Fin m, (a w i - A w M * a y i) • x' i + A w M • b y
-\end{lstlisting}
-The rest is a simple manipulation with sums.
-
-
-\end{document}
diff --git a/nonLean/algebraic_hierarchy.dot b/nonLean/algebraic_hierarchy.dot
deleted file mode 100644
index 045ed58..0000000
--- a/nonLean/algebraic_hierarchy.dot
+++ /dev/null
@@ -1,89 +0,0 @@
-digraph G {
-
- Add -> AddSemigroup;
- Mul -> Semigroup;
- Zero -> AddZeroClass;
- Add -> AddZeroClass;
- AddSemigroup -> AddMonoid;
- AddZeroClass -> AddMonoid;
- One -> MulOneClass;
- Mul -> MulOneClass;
- Semigroup -> Monoid;
- MulOneClass -> Monoid;
- AddMonoid -> SubNegMonoid;
- Neg -> SubNegMonoid;
- Sub -> SubNegMonoid;
- Monoid -> DivInvMonoid;
- Inv -> DivInvMonoid;
- Div -> DivInvMonoid;
- SubNegMonoid -> AddGroup;
- Add -> AddCommMagma;
- Mul -> CommMagma;
- AddSemigroup -> AddCommSemigroup;
- AddCommMagma -> AddCommSemigroup;
- Semigroup -> CommSemigroup;
- CommMagma -> CommSemigroup;
- AddGroup -> AddCommGroup;
- AddCommMonoid -> AddCommGroup;
- AddMonoid -> AddCommMonoid;
- AddCommSemigroup -> AddCommMonoid;
- Monoid -> CommMonoid;
- CommSemigroup -> CommMonoid;
- Mul -> Distrib;
- Add -> Distrib;
- Mul -> MulZeroClass;
- Zero -> MulZeroClass;
- AddCommMonoid -> NonUnitalNonAssocSemiring;
- Distrib -> NonUnitalNonAssocSemiring;
- MulZeroClass -> NonUnitalNonAssocSemiring;
- Semigroup -> SemigroupWithZero;
- MulZeroClass -> SemigroupWithZero;
- NonUnitalNonAssocSemiring -> NonUnitalSemiring;
- SemigroupWithZero -> NonUnitalSemiring;
- AddMonoid -> AddMonoidWithOne;
- AddMonoidWithOne -> AddGroupWithOne;
- AddGroup -> AddGroupWithOne;
- One -> AddMonoidWithOne;
- AddMonoidWithOne -> AddCommMonoidWithOne;
- AddCommMonoid -> AddCommMonoidWithOne;
- MulOneClass -> MulZeroOneClass;
- MulZeroClass -> MulZeroOneClass;
- NonUnitalNonAssocSemiring -> NonAssocSemiring;
- MulZeroOneClass -> NonAssocSemiring;
- AddCommMonoidWithOne -> NonAssocSemiring;
- Monoid -> MonoidWithZero;
- MulZeroOneClass -> MonoidWithZero;
- SemigroupWithZero -> MonoidWithZero;
- NonUnitalSemiring -> Semiring;
- NonAssocSemiring -> Semiring;
- MonoidWithZero -> Semiring;
- Semiring -> Ring;
- AddCommGroup -> Ring;
- AddGroupWithOne -> Ring;
- Ring -> CommRing;
- CommMonoid -> CommRing;
- Ring -> DivisionRing;
- DivInvMonoid -> DivisionRing;
- Nontrivial -> DivisionRing;
- CommRing -> Field;
- DivisionRing -> Field;
-
- LE -> Preorder;
- LT -> Preorder;
- Preorder -> PartialOrder;
- PartialOrder -> LinearOrder;
- AddCommGroup -> OrderedAddCommGroup;
- PartialOrder -> OrderedAddCommGroup;
- Ring -> StrictOrderedRing;
- OrderedAddCommGroup -> StrictOrderedRing;
- Nontrivial -> StrictOrderedRing;
- StrictOrderedRing -> LinearOrderedRing;
- LinearOrder -> LinearOrderedRing;
- LinearOrderedRing -> LinearOrderedCommRing;
- CommMonoid -> LinearOrderedCommRing;
- LinearOrderedCommRing -> LinearOrderedField;
- Field -> LinearOrderedField;
- OrderedAddCommGroup -> LinearOrderedAddCommGroup;
- LinearOrder -> LinearOrderedAddCommGroup;
-
-}
\ No newline at end of file
diff --git a/nonLean/algebraic_hierarchy.png b/nonLean/algebraic_hierarchy.png
deleted file mode 100644
index d3639d9..0000000
Binary files a/nonLean/algebraic_hierarchy.png and /dev/null differ
diff --git a/nonLean/algebraic_hierarchy.svg b/nonLean/algebraic_hierarchy.svg
deleted file mode 100644
index 15b75f7..0000000
--- a/nonLean/algebraic_hierarchy.svg
+++ /dev/null
@@ -1,823 +0,0 @@
-
\ No newline at end of file
diff --git a/nonLean/duality.bib b/nonLean/duality.bib
deleted file mode 100644
index 58a39ce..0000000
--- a/nonLean/duality.bib
+++ /dev/null
@@ -1,176 +0,0 @@
-
-@article{Farkas-AFP,
- author = {Ralph Bottesch and Max W. Haslbeck and René Thiemann},
- title = {Farkas' Lemma and {M}otzkin's Transposition Theorem},
- journal = {Archive of Formal Proofs},
- month = {January},
- year = {2019},
- note = {\url{https://isa-afp.org/entries/Farkas.html},
- Formal proof development},
- ISSN = {2150-914x},
-}
-
-@article{Linear-AFP,
- author = {Ralph Bottesch and Alban Reynaud and René Thiemann},
- title = {Linear Inequalities},
- journal = {Archive of Formal Proofs},
- month = {June},
- year = {2019},
- note = {\url{https://isa-afp.org/entries/Linear\_Inequalities.html},
- Formal proof development},
- ISSN = {2150-914x},
-}
-
-@article{Duality-AFP,
- author = {René Thiemann},
- title = {Duality of Linear Programming},
- journal = {Archive of Formal Proofs},
- month = {February},
- year = {2022},
- note = {\url{https://isa-afp.org/entries/LP\_Duality.html},
- Formal proof development},
- ISSN = {2150-914x},
-}
-
-@misc{Simplex-Coq,
- title={A formalization of convex polyhedra based on the simplex method},
- author={Xavier Allamigeon and Ricardo D. Katz},
- year={2018},
- eprint={1706.10269},
- archivePrefix={arXiv},
- primaryClass={cs.LO},
- url={https://arxiv.org/abs/1706.10269},
-}
-
-@misc{DivBy0,
- title = {Division by zero in type theory: a {FAQ}},
- howpublished = {\url{https://xenaproject.wordpress.com/2020/07/05/division-by-zero-in-type-theory-a-faq/}},
- note = {Accessed: 2014-08-28}
-}
-
-@article{Believe,
- title = {How to Believe a Machine-Checked Proof},
- volume = {4},
- ISSN = {0909-0878},
- url = {http://dx.doi.org/10.7146/brics.v4i18.18945},
- DOI = {10.7146/brics.v4i18.18945},
- number = {18},
- journal = {BRICS Report Series},
- publisher = {Det Kgl. Bibliotek/Royal Danish Library},
- author = {Pollack, Robert},
- year = {1997},
- month = jan
-}
-
-@inproceedings{Aesop,
- series = {CPP ’23},
- title = {Aesop: White-Box Best-First Proof Search for {Lean}},
- url = {http://dx.doi.org/10.1145/3573105.3575671},
- DOI = {10.1145/3573105.3575671},
- booktitle = {Proceedings of the 12th ACM SIGPLAN International Conference on Certified Programs and Proofs},
- publisher = {ACM},
- author = {Limperg, Jannis and From, Asta Halkjær},
- year = {2023},
- month = jan,
- collection = {CPP ’23}
-}
-
-
-
-@article{Farkas1894,
- title={A {F}ourier-f{\'e}le mechanikai elv alkalmaz{\'a}sai},
- author={Farkas, Gy{\"o}rgy},
- journal={Mathematikai {\'e}s Term{\'e}szettudom{\'a}nyi {\'E}rtesit{\"o}},
- volume={12},
- pages={457--472},
- year={1894}
-}
-
-@article{Farkas1898,
- title={Param{\'e}teres m{\'o}dszer Fourier mechanikai elv{\'e}hez},
- author={Farkas, Gy{\"o}rgy},
- journal={Mathematikai {\'e}s Physikai Lapok},
- volume={7},
- pages={63--71},
- year={1898}
-}
-
-@article{Chernikov,
- title={Linear Inequalities},
- author={Chernikov, Sergei Nikolaevich},
- journal={Nauka},
- year={1968}
-}
-
-@article{Bartl2007,
- title = {Farkas’ Lemma, other theorems of the alternative, and linear programming in infinite-dimensional spaces: a purely linear-algebraic approach},
- volume = {55},
- ISSN = {1563-5139},
- url = {http://dx.doi.org/10.1080/03081080600967820},
- DOI = {10.1080/03081080600967820},
- number = {4},
- journal = {Linear and Multilinear Algebra},
- publisher = {Informa UK Limited},
- author = {Bartl, David},
- year = {2007},
- month = jul,
- pages = {327–353}
-}
-
-@article{Bartl2011,
- title = {A very short algebraic proof of the {F}arkas Lemma},
- volume = {75},
- ISSN = {1432-5217},
- url = {http://dx.doi.org/10.1007/s00186-011-0377-y},
- DOI = {10.1007/s00186-011-0377-y},
- number = {1},
- journal = {Mathematical Methods of Operations Research},
- publisher = {Springer Science and Business Media LLC},
- author = {Bartl, David},
- year = {2011},
- month = dec,
- pages = {101–104}
-}
-
-@article{Perng2017,
- title = {On a Class of Theorems Equivalent to {F}arkas's Lemma},
- author = {Cherng-tiao Perng},
- volume = {11},
- number = {44},
- journal = {Applied Mathematical Sciences},
- year = 2017,
- pages = {2175--2184}
-}
-
-@book{Minkowski,
- author = {Minkowski, H.},
- title = {Geometrie der Zahlen (Erste Lieferung)},
- publisher = {Teubner, Leipzig},
- year = 1986
-}
-
-@unpublished{Discussion,
- author = "J. von Neumann and G. Dantzig",
- title = "Discussion of a maximum problem",
- note = "Instutute for Advanced Study, Princeton, NJ",
- year = "1947"
-}
-
-@inproceedings{GaleKuhnTucker,
-author = {D. Gale and H.W. Kuhn and A.W. Tucker},
-title = {Linear Programming and the theory of games},
-year = {1951},
-pages = {317--329},
-booktitle = {Activity Analysis of Production and Allocation}
-}
-
-@Article{BLP,
- author = "Vladimir Kolmogorov and Johan Thapper and Stanislav {\v{Z}ivn\'y}",
- title = "The power of linear programming for general-valued {CSP}s",
- year = "2015",
- volume = 44,
- number = 1,
- pages = {1–-36},
- journal = "SIAM Journal on Computing"
-}
-
diff --git a/nonLean/duality.pdf b/nonLean/duality.pdf
deleted file mode 100644
index d42b86b..0000000
Binary files a/nonLean/duality.pdf and /dev/null differ
diff --git a/nonLean/duality.tex b/nonLean/duality.tex
deleted file mode 100644
index 4f8f5c8..0000000
--- a/nonLean/duality.tex
+++ /dev/null
@@ -1,1794 +0,0 @@
-\documentclass[]{article}
-\usepackage[portrait, margin=5mm]{geometry}
-\usepackage{amsmath,amsthm}
-\usepackage{amssymb}
-\usepackage{amsfonts}
-\usepackage{listings}
-\usepackage{graphicx}
-\usepackage{array}
-
-\usepackage{hyperref}
-\usepackage{xcolor}
-
-\pagenumbering{gobble}
-
-
-\renewcommand{\.}{\hskip .75pt}
-
-\renewcommand{\arraystretch}{1.25}
-
-\newcommand{\fin}[1]{[\.#1\.]}
-
-\newcommand{\sekt}[1]{Section~\ref{#1}}
-
-\DeclareMathOperator{\aand}{\;\wedge\;}
-\DeclareMathOperator{\st}{,\;}
-\DeclareMathOperator{\ex}{\,\exists}
-
-\let\r=\rightarrow
-\let\*=\cdot
-
-\makeatletter
-\newcommand{\thickhline}{%
- \noalign {\ifnum 0=`}\fi \hrule height 1.5pt
- \futurelet \reserved@a \@xhline
-}
-\newcolumntype{"}{@{\hskip\tabcolsep\vrule width 1.5pt\hskip\tabcolsep}}
-\makeatother
-
-
-\makeatletter
-\newcommand{\customlabel}[2]{%
- \protected@write \@auxout {}{\string \newlabel {#1}{{#2}{\thepage}{#2}{#1}{}} }%
- \hypertarget{#1}{#2\!\!}
-}
-\makeatother
-
-\makeatletter
-\newcommand{\customlabelrestated}[2]{%
- \protected@write \@auxout {}{\string \newlabel {#1}{{#2}{\thepage}{#2}{#1}{}} }%
- \hypertarget{#1}{#2, restated\!\!}
-}
-\makeatother
-
-\newtheorem*{theorem}{Theorem}
-\newtheorem*{lemma}{Lemma}
-\newtheorem*{definition}{Definition}
-
-
-
-\begin{document}
-
-\lstset{
- basicstyle=\ttfamily\small,
- literate=
- {→}{{$\rightarrow$}}1
- {∀}{{$\forall$}}1
- {∃}{{$\exists$}}1
- {×}{{$\times$}}1
- {σ}{{$\sigma$}}1
- {τ}{{$\tau$}}1
- {α}{{$\alpha$}}1
- {β}{{$\beta$}}1
- {γ}{{$\gamma$}}1
- {≠}{{$\neq$}}1
- {≤}{{$\leq$}}1
- {≥}{{$\geq$}}1
- {↔}{{$\iff$}}1
- {¬}{{$\neg$}}1
- {∧}{{$\wedge$}}1
- {∨}{{$\vee$}}1
- {•}{$\bullet$}1
- {·}{$\cdot$}1
- {⬝}{$\cdot$}1
- {ℕ}{{$\mathbb{N}$}}1
- {ℤ}{{$\mathbb{Z}$}}1
- {ₗ}{{$_l$}}1
- {₀}{{$_0$}}1
- {₁}{{$_1$}}1
- {₂}{{$_2$}}1
- {ᵥ}{{$_v$}}1
- {ₘ}{{$_m$}}1
- {⁻¹}{{$^{-1}$}}1
- {∞}{{$\infty$}}1
- {∑}{{$\sum$}}1
- {ᵀ}{{$^\texttt{T}$}}1
- {⊤}{{$\top$}}1
- {⊥}{{$\bot$}}1
- {⟨}{{$\langle$}}1
- {⟩}{{$\rangle$}}1
- {∘}{{$\circ$}}1
-}
-
-
-\title{Duality theory in linear optimization and its extensions\:---\:formally verified}
-
-\author{Martin Dvorak \hspace{30pt} Vladimir Kolmogorov \\ \normalsize Institute of Science and Technology Austria \\ {\normalsize\tt $\{$martin.dvorak,vnk$\}$@ista.ac.at}}
-\date{}
-
-%\author{Martin Dvorak, Vladimir Kolmogorov}
-%\date{2024}
-\maketitle
-
-
-\noindent \textbf{Abstract:}\;
-%\begin{abstract}
-Farkas established that a system of linear inequalities has a solution if and only if we cannot obtain
-a contradiction by taking a linear combination of the inequalities.
-We state and formally prove several Farkas-like theorems over linearly ordered fields in Lean 4.
-Furthermore, we extend duality theory to the case when some coefficients are allowed to take
-``infinite values''.
-%Furthermore, we consider a linearly ordered field extended with two special elements denoted by $\bot$ and $\top$
-%where $\bot$ is below every element and $\top$ is above every element.
-%We define $\bot + a = \bot = a + \bot$ for all $a$ and we define $\top + b = \top = b + \top$ for all $b \neq \bot$.
-%Instead of multiplication, we define scalar action $c \cdot \bot = \bot$ for every $c \ge 0$ but we define
-%$d \cdot \top = \top$ only for $d > 0$ because $0 \cdot \top = 0$.
-%We extend certain Farkas-like theorems to a setting where coefficients are from an extended linearly ordered field.
-%\end{abstract}
-\medskip
-
-
-\noindent \textbf{Keywords:}\;
-Farkas lemma, linear programming, extended rationals, extended reals, calculus of inductive constructions
-
-
-\section{Introduction}
-\label{introduction}
-
-When studying linear programming, we often ask whether a given
-system of linear inequalities has a solution.
-Duality theorems answer these naturally arising questions as follows:
-A system of linear inequalities has a solution if and only if
-we cannot obtain a contradiction by taking a linear combination of
-the inequalities. When we formulate duality theorems as
-``either there exists a solution, or there exists a vector of coefficients
-that tells us how to derive $0 < 0$ from given inequalities'',
-they are usually called {\em theorems of alternatives}.
-Two well-known theorems of alternatives are as follows
-(Farkas~\cite{Farkas1894,Farkas1898}; Minkowski~\cite{Minkowski}).
-
-\begin{theorem}[\customlabel{equalityFarkas}{\texttt{equalityFarkas}}]
-Let $I$ and $J$ be finite types.
-Let $F$ be a linearly ordered field.
-Let $A$ be a matrix of type $(I \times J) \r F$.
-Let $b$ be a vector of type $I \r F$.
-Exactly one of the following exists:
-\begin{itemize}
-\item nonnegative vector $x : J \r F$ such that $A \* x = b$
-\item vector $y : I \r F$ such that $A^T\! \* y \ge 0$ and $b \* y < 0$
-\end{itemize}
-\end{theorem}
-
-\begin{theorem}[\customlabel{inequalityFarkas}{\texttt{inequalityFarkas}}]
-Let $I$ and $J$ be finite types.
-Let $F$ be a linearly ordered field.
-Let $A$ be a matrix of type $(I \times J) \r F$.
-Let $b$ be a vector of type $I \r F$.
-Exactly one of the following exists:
-\begin{itemize}
-\item nonnegative vector $x : J \r F$ such that $A \* x \le b$
-\item nonnegative vector $y : I \r F$ such that $A^T\! \* y \ge 0$ and $b \* y < 0$
-\end{itemize}
-\end{theorem}
-
-For optimization problems, duality asserts a correspondence between
-the optimal value of a linear program and the optimal value of its dual
-(originally studied in the context of zero-sum games by Dantzig, von Neumann~\cite{Discussion};
-later in Gale, Kuhn, Tucker~\cite{GaleKuhnTucker}).
-The strong duality theorem, the cornerstone of linear programming, can be stated as follows.
-% maybe "which is a cornerstone of linear programming"
-
-\begin{theorem}[\customlabel{StandardLP.strongDuality}{\texttt{StandardLP.strongDuality}}]
-Let $I$ and $J$ be finite types.
-Let $F$ be a linearly ordered field.
-Let $A$ be a matrix of type $(I \times J) \r F$.
-Let $b$ be a vector of type $I \r F$.
-Let $c$ be a vector of type $J \r F$.
-Then
-\begin{equation}
-\min \,\{\, c \* x ~|~ x \ge 0 \aand A \* x \le b \,\}
-=- \min \,\{\, b \* y ~|~ y \ge 0 \aand (-A^T) \* y \le c \,\}
-\label{eq:StandardLP.strongDuality:minmin}
-\end{equation}
-holds if at least one of the systems has a solution (very roughly paraphrased).
-\end{theorem}
-Using identity $\min\{c\*x~|~\ldots\}=-\max\{-c\*x~|~\ldots\}$ and replacing $c$ with $-c$,
-eq.~\eqref{eq:StandardLP.strongDuality:minmin} can be equivalently transformed to
-\begin{equation}
-\max \,\{\, c \* x ~|~ x \ge 0 \aand A \* x \le b \,\}
-= \min \,\{\, b \* y ~|~ y \ge 0 \aand A^T \!\* y \ge c \,\}
-\label{eq:StandardLP.strongDuality:maxmin}
-\end{equation}
-which is probably a more familiar formulation of strong LP duality.
-Later it will be clear why we chose the ``$\min\,$/$\,\min$''
-rather than the more idiomatic ``$\max\,$/$\,\min$'' formulation. Also, we do not investigate
-``asymmetric versions'' such as:
-$$ \max \,\{\, c \* x ~|~ x \ge 0 \aand A \* x = b \,\}
-= \min \,\{\, b \* y ~|~ A^T \!\* y \ge c \,\}
-$$
-
-For many more Farkas-like theorems (beyond the scope of this paper),
-see for example \cite{Perng2017}.
-
-This paper makes the following contributions.
-\begin{itemize}
-\item We formally prove several existing duality results (including the three theorems above) in Lean 4.
-In fact, we prove a more general version of \ref{equalityFarkas} due to Bartl~\cite{Bartl2007}.
-%This version allows, in particular, allows structures with a non-commutative multiplication, and also allows infinitely many equations.
-\sekt{generalizations} says more about it.
-\item We establish (and formally prove in Lean 4) a new generalization
-of~\ref{inequalityFarkas} and~\ref{StandardLP.strongDuality}
-to the case when some of the coefficients are allowed to have infinite values.
-This scenario can be motivated by discrete optimization problems with ``hard'' constraints
-(the ``hard'' constraints declare what has to be be satisfied,
-whereäs ``soft'' constraints declare what should be optimized).
-A common way to concisely write down such problems mathematically is to use
-infinite coefficients in front of the corresponding terms in the objective.
-Since infinities are not allowed in traditional LPs,
-``soft'' and ``hard'' constraints need to be handled differently
-when formulating LP relaxations of such problems (see e.g.~\cite{BLP}).
-Our work provides a more direct way to formulate such relaxations.
-\sekt{extensions} says more about the extensions.
-\end{itemize}
-
-\subsection{Bartl's generalization}\label{generalizations}
-
-The next theorem generalizes \ref{equalityFarkas} to structures where
-multiplication does not have to be commutative.
-Furthermore, it supports infinitely many equations.
-
-\begin{theorem}[\customlabel{coordinateFarkas}{\texttt{coordinateFarkasBartl}}]
-Let $I$ be any type.
-Let $J$ be a finite type.
-Let $R$ be a linearly ordered division ring.
-Let $A$ be an $R$-linear map from $(I \r R)$ to $(J \r R)$.
-Let $b$ be an $R$-linear map from $(I \r R)$ to $R$.
-Exactly one of the following exists:
-\begin{itemize}
-\item nonnegative vector $x : J \r R$ such that, for all $w : I \r R$, we have
-$ \sum_{j : J}\; (A~w)_j \cdot x_j = b~w $
-\item vector $y : I \r R$ such that $A~y \ge 0$ and $b~y < 0$
-\end{itemize}
-\end{theorem}
-
-Note that \ref{equalityFarkas} for matrix $A: I \times J \r F$ and vector $b : I \r F$
-can be obtained by applying \ref{coordinateFarkas} to the $F$-linear maps
-$(A^T \* \phantom{v})$ and $(b \* \phantom{v})$ utilizing the fact that
-two linear maps are equal if and only if they map the basis vectors equally.
-
-In the next generalization (similar to \cite{Chernikov}),
-the partially ordered module $I \r R$ is replaced by a general $R$-module $W$.
-
-\begin{theorem}[\customlabel{scalarFarkas}{\texttt{almostFarkasBartl}}]
-Let $J$ be a finite type.
-Let $R$ be a linearly ordered division ring.
-Let $W$ be an $R$-module.
-Let $A$ be an $R$-linear map from from $W$ to $(J \r R)$.
-Let $b$ be an $R$-linear map from from $W$ to $R$.
-Exactly one of the following exists:
-\begin{itemize}
-\item nonnegative vector $x : J \r R$ such that, for all $w : W$, we have
-$ \sum_{j : J}\; (A~w)_j \cdot x_j = b~w $
-\item vector $y : W$ such that $A~y \ge 0$ and $b~y < 0$
-\end{itemize}
-\end{theorem}
-In the most general theorem, stated below, certain occurrences of $R$ are replaced by
-a linearly ordered $R$-module $V$ whose order respects the order on $R$
-(for a formal definition, see the end of \sekt{modules}).
-
-\begin{theorem}[\customlabel{fintypeFarkasBartl}{\texttt{fintypeFarkasBartl}}]
-Let $J$ be a finite type.
-Let $R$ be a linearly ordered division ring.
-Let $W$ be an $R$-module.
-Let $V$ be a linearly ordered $R$-module whose ordering satisfies
-monotonicity of scalar multiplication by nonnegative elements on the left.
-Let $A$ be an $R$-linear map from $W$ to $(J \r R)$.
-Let $b$ be an $R$-linear map from $W$ to $V$.
-Exactly one of the following exists:
-\begin{itemize}
-\item nonnegative vector family $x : J \r V$ such that, for all $w : W$, we have
-$ \sum_{j : J}\; (A~w)_j \cdot x_j = b~w $
-\item vector $y : W$ such that $A~y \ge 0$ and $b~y < 0$
-\end{itemize}
-\end{theorem}
-In the last branch, $A~y \ge 0$ uses the partial order on $(J \r R)$ whereäs
-$b~y < 0$ uses the linear order on $V$.
-Note that \ref{fintypeFarkasBartl} subsumes \ref{scalarFarkas} (as well as the other versions based on equality),
-since $R$ can be viewed as a linearly ordered module over itself.
-We prove \ref{fintypeFarkasBartl} in \sekt{bartl}, which is where the heavy lifting comes.
-Our proof is based on \cite{Bartl2011}.
-
-
-\subsection{Extension to infinite coefficients}\label{extensions}
-
-Until now, we have talked about known results.
-What follows is a new extension of the theory.
-
-\begin{definition}
-Let $F$ be a linearly ordered field.
-We define an \emph{extended} linearly ordered field $F_\infty$ as
-$F \cup \{ \bot, \top \}$ with the following properties.
-Let $p$ and $q$ be numbers from $F$.
-We have $\bot < p < \top$.
-We define addition, negation, and scalar action on $F_\infty$ as follows:
-\begin{center}
- \begin{tabular}{ | c " c | c | c | }
- \hline
- $+$ & $\bot$ & $q$ & $\top$ \\
- \thickhline
- $\bot$ & $\bot$ & $\bot$ & $\bot$ \\
- \hline
- $p$ & $\bot$ & $p\!+\!q$ & $\top$ \\
- \hline
- $\top$ & $\bot$ & $\top$ & $\top$ \\
- \hline
- \end{tabular}
- \qquad\qquad\qquad
- \begin{tabular}{ | c " c | c | c | }
- \hline
- $-$ & $\bot$ & $q$ & $\top$ \\
- \thickhline
- $=$ & $\top$ & $-q$ & $\bot$ \\
- \hline
- \end{tabular}
- \qquad\qquad\qquad
- \begin{tabular}{ | c " c | c | c | }
- \hline
- $\cdot$ & $\bot$ & $q$ & $\top$ \\
- \thickhline
- $0$ & $\bot$ & $0$ & $0$ \\
- \hline
- $p>0$ & $\bot$ & $p \cdot q$ & $\top$ \\
- \hline
- \end{tabular}
-\end{center}
-When we talk about elements of $F_\infty$,
-we say that values from $F$ are \emph{finite}.
-\end{definition}
-
-Informally speaking, $\top$ represents the positive infinity,
-$\bot$ represents the negative infinity, and we say that
-$\bot$ is ``stronger'' than $\top$ in~all arithmetic operations.
-The surprising parts are $\bot + \top = \bot$ and $0 \.\cdot \bot = \bot$.
-Because of them, $F_\infty$ is not a field.
-In fact, $F_\infty$ is not even a group.
-However, $F_\infty$ is still a densely linearly ordered abelian monoid
-with characteristic zero.
-
-\begin{theorem}[\customlabel{extendedFarkas}{\texttt{extendedFarkas}}]
-Let $I$ and $J$ be finite types.
-Let $F$ be a linearly ordered field.
-Let $A$ be a matrix of type $(I \times J) \r F_\infty$.
-Let $b$ be a vector of type $I \r F_\infty$.
-Assume that $A$ does not have $\bot$ and $\top$ in the same row.
-Assume that $A$ does not have $\bot$ and $\top$ in the same column.
-Assume that $A$ does not have $\top$ in any row where $b$ has $\top$.
-Assume that $A$ does not have $\bot$ in any row where $b$ has~$\bot$.
-Exactly one of the following exists:
-\begin{itemize}
-\item nonnegative vector $x : J \r F$ such that $A \* x \le b$
-\item nonnegative vector $y : I \r F$ such that $(-A^T) \* y \le 0$ and $b \* y < 0$
-\end{itemize}
-\end{theorem}
-Note \ref{extendedFarkas} has four preconditions on matrix $A$ and vector $b$.
-In \sekt{sec:extendedFarkas:counterexamples}
-we show that omitting any of them makes the theorem false.
-Observe that \ref{inequalityFarkas} has condition $A^T \* y \ge 0$
-in the second branch, while in \ref{extendedFarkas}
-we changed it to $(-A^T) \* y \le 0$.
-The two conditions are equivalent
-for finite-valued matrices $A$,
-but not necessarily for matrices $A$ with infinities
-(e.g.\ condition
-$(-\top) \* 0 \ge 0$ is false but
-$\top \* 0 \le 0$ is true).
-One must be careful when formulating this condition;
-for example, using the condition from \ref{inequalityFarkas}
-would make the theorem false even if $A$ has a single $\bot$ entry
-(see \sekt{sec:extendedFarkas:counterexamples}).
-
-Next, we define an extended notion of linear program, i.e.,
-linear programming over extended linearly ordered fields.
-The implicit intention is that the linear program is to be minimized.
-
-\begin{definition}
-Let $I$ and $J$ be finite types.
-Let $F$ be a linearly ordered field.
-Let $A$ be a matrix of type $(I \times J) \r F_\infty$,
-let $b$ be a vector of type $I \r F_\infty$,
-and $c$ be a vector of type $J \r F_\infty$.
-We say that $P = (A, b, c)$ is a \emph{linear program} over $F_\infty$
-whose constraints are indexed by $I$ and variables are indexed by $J$.
-
-A nonnegative vector $x : J \r R$ is a \emph{solution} to $P$ if $A \* x \le b$.
-We say that $P$ is \emph{feasible} if there exists \emph{solution} $x$ with $c \* x \ne \top$.
-We say that $P$ is \emph{unbounded} if, for any $r\in F$, there exists solution $x$
-with $c \* x \le r$.
-
-The \emph{optimum} of $P$, denoted as $P^\star$,
-is defined as follows. If $P$ is not feasible then $P^\star=\top$.
-Else, if $P$ is unbounded, then $P^\star=\bot$.
-Else, if there exists finite $r\in F$ such that $c \* x \ge r$ for all solutions $x$
-and $c \* x^\star = r$ for some solution $x^\star$, then $P^\star=r$.
-Otherwise, $P^\star$ is undefined.
-\end{definition}
-
-In order to state our duality results, we need a few more definitions.
-
-\begin{definition}
-Linear Program $P = (A, b, c)$ over $F_\infty$ is said to be
-{\em valid} if it satisfies the following six conditions:
-\begin{itemize}
-\item $A$ does not have $\bot$ and $\top$ in the same row
-\item $A$ does not have $\bot$ and $\top$ in the same column
-\item $A$ does not have $\bot$ in any row where $b$ has $\bot$
-\item $A$ does not have $\top$ in any column where $c$ has $\bot$
-\item $A$ does not have $\top$ in any row where $b$ has $\top$
-\item $A$ does not have $\bot$ in any column where $c$ has $\top$
-\end{itemize}
-We say that the linear program $(-A^T, c, b)$ is the \emph{dual} of $P$.
-\end{definition}
-It is straightforward to check that the dual of a valid LP is valid.
-
-\begin{theorem}[\customlabel{ExtendedLP.strongDuality}{\texttt{ExtendedLP.strongDuality}}]
-Let $F$ be a linearly ordered field,
-$P$ be a valid linear program over $F_\infty$ and $D$ be its dual.
-Then $P^\star$ and $D^\star$ are defined.
-If at least one of them is feasible, i.e., $(P^\star, D^\star) \ne (\top, \top)$,
-then $P^\star = -D^\star$.
-\end{theorem}
-\begin{sloppypar} % ``unrecognized environment''
-Similar to~\ref{extendedFarkas}, all six conditions in the definition of a valid LP are necessary:
-omitting any one of them makes~\ref{ExtendedLP.strongDuality} false (see counterexamples in \sekt{sec:ExtendedLP.strongDuality:counterexamples}).
-\end{sloppypar}
-
-Note that the conclusion of the theorem ($P^\star = -D^\star$) can be reformulated,
-when we use highly informal notation, as in eq.~\eqref{eq:StandardLP.strongDuality:minmin}:
-$$ \min \,\{\, c \* x ~|~ x \ge 0 \aand A \* x \le b \,\}
-=- \min \,\{\, b \* y ~|~ y \ge 0 \aand (-A^T) \* y \le c \,\}
-$$
-If all entries of $(A,b,c)$ are finite then this equation can be stated in
-many equivalent ways, e.g.\ as in~\eqref{eq:StandardLP.strongDuality:maxmin}.
-This reformulation uses the facts
-that $ (-c) \* x = - (c \* x) $,
-and that $(-A^T) \* y \le c$ is equivalent to $A^T \* y \ge -c$.
-These facts are no longer true if infinities are allowed,
-so one must be careful when formulating the duality theorem for $F_\infty$.
-We considered several ``$\max\,$/$\,\min$'' and ``$\max\,$/$\,\max$'' versions,
-but they all required stronger preconditions compared to the ``$\min\,$/$\,\min$'' version
-given in~\ref{ExtendedLP.strongDuality}.
-
-\subsection{Structure of this paper}
-
-In \sekt{preliminaries}, we explain all underlying definitions and
-comment on the formalization process;
-following the philosophy of \cite{Believe}
-we review almost all the declarations needed for the reader to believe
-our results, leaving out many declarations that were used in order
-to prove the results.
-In \sekt{statements}, we formally state theorems from \sekt{introduction}
-using definitions from \sekt{preliminaries}.
-In \sekt{bartl}, we prove \ref{fintypeFarkasBartl} (stated in \sekt{generalizations}),
-from which be obtain \ref{equalityFarkas} as a corollary.
-In \sekt{extended}, we prove \ref{extendedFarkas} (stated in \sekt{extensions}).
-In \sekt{dualityELP}, we prove \ref{ExtendedLP.strongDuality}
-(stated in \sekt{extensions}), from which we obtain
-\ref{StandardLP.strongDuality} as a corollary.
-In \sekt{counterexamples}, we show what happens when various preconditions
-are not satisfied.
-
-Repository \texttt{https://github.com/madvorak/duality}
-contains full version of all definitions, statements,
-and proofs. They are written in a formal language called
-Lean 4, which provides a guarantee that every step of
-every proof follows from valid logical axioms.\footnote{
-The only axioms used in our proofs are
-\texttt{propext}, \texttt{Classical.choice}, and
-\texttt{Quot.sound}, which you can check by the
-\texttt{\#print axioms} command.}
-This paper attempts to be an accurate description of
-the \texttt{duality} project.
-However, in case of any discrepancy, the code shall prevail.
-
-\section{Preliminaries}
-\label{preliminaries}
-
-There are many layers of definitions that are built before say
-what a linearly ordered field $F$ is (used e.g.~in \ref{equalityFarkas}),
-what a linearly ordered division ring $R$ is (used e.g.~in \ref{scalarFarkas}), and
-what linearly ordered abelian group $V$ is (used in \ref{fintypeFarkasBartl}).
-%We need these three algebraic structures for stating our main results.
-This section mainly documents existing Mathlib definitions.
-We also highlight custom definitions added to our project only.
-
-\subsection{Review of algebraic typeclasses that our project depends on}
-
-Additive semigroup is a structure on any type with addition where the addition is associative:
-\begin{lstlisting}
-class AddSemigroup (G : Type u) extends Add G where
- add_assoc : ∀ a b c : G, (a + b) + c = a + (b + c)
-\end{lstlisting}
-Similarly, semigroup is a structure on any type with multiplication where the multiplication is associative:
-\begin{lstlisting}
-class Semigroup (G : Type u) extends Mul G where
- mul_assoc : ∀ a b c : G, (a * b) * c = a * (b * c)
-\end{lstlisting}
-Additive monoid is an additive semigroup with the ``zero'' element that is neutral with respect to addition
-from both left and right, thanks to which we can define a scalar multiplication by the natural numbers:
-\begin{lstlisting}
-class AddZeroClass (M : Type u) extends Zero M, Add M where
- zero_add : ∀ a : M, 0 + a = a
- add_zero : ∀ a : M, a + 0 = a
-class AddMonoid (M : Type u) extends AddSemigroup M, AddZeroClass M where
- nsmul : ℕ → M → M
- nsmul_zero : ∀ x : M, nsmul 0 x = 0
- nsmul_succ : ∀ (n : ℕ) (x : M), nsmul (n + 1) x = nsmul n x + x
-\end{lstlisting}
-Similarly, monoid is a semigroup with the ``one'' element that is neutral with respect to multiplication
-from both left and right, thanks to which we can define a power to the natural numbers:
-\begin{lstlisting}
-class MulOneClass (M : Type u) extends One M, Mul M where
- one_mul : ∀ a : M, 1 * a = a
- mul_one : ∀ a : M, a * 1 = a
-class Monoid (M : Type u) extends Semigroup M, MulOneClass M where
- npow : ℕ → M → M
- npow_zero : ∀ x : M, npow 0 x = 1
- npow_succ : ∀ (n : ℕ) (x : M), npow (n + 1) x = npow n x * x
-\end{lstlisting}
-Subtractive monoid is an additive monoid that adds two more operations (unary and binary minus)
-that satisfy some basic properties (please note that ``adding minus itself gives zero''
-is not required yet; that will be required e.g.~in an additive group):
-\begin{lstlisting}
-class SubNegMonoid (G : Type u) extends AddMonoid G, Neg G, Sub G where
- sub := SubNegMonoid.sub'
- sub_eq_add_neg : ∀ a b : G, a - b = a + -b
- zsmul : ℤ → G → G
- zsmul_zero' : ∀ a : G, zsmul 0 a = 0
- zsmul_succ' (n : ℕ) (a : G) : zsmul (Int.ofNat n.succ) a = zsmul (Int.ofNat n) a + a
- zsmul_neg' (n : ℕ) (a : G) : zsmul (Int.negSucc n) a = -(zsmul n.succ a)
-\end{lstlisting}
-Similarly, division monoid is a monoid that adds two more operations (inverse and divide)
-that satisfy some basic properties (please note that ``multiplication by an inverse gives one''
-is not required yet; that will be required e.g.~in a group):
-\begin{lstlisting}
-class DivInvMonoid (G : Type u) extends Monoid G, Inv G, Div G where
- div := DivInvMonoid.div'
- div_eq_mul_inv : ∀ a b : G, a / b = a * b⁻¹
- zpow : ℤ → G → G
- zpow_zero' : ∀ a : G, zpow 0 a = 1
- zpow_succ' (n : ℕ) (a : G) : zpow (Int.ofNat n.succ) a = zpow (Int.ofNat n) a * a
- zpow_neg' (n : ℕ) (a : G) : zpow (Int.negSucc n) a = (zpow n.succ a)⁻¹
-\end{lstlisting}
-Additive group is a subtractive monoid in which the unary minus acts as a left inverse with respect to addition:
-\begin{lstlisting}
-class AddGroup (A : Type u) extends SubNegMonoid A where
- add_left_neg : ∀ a : A, -a + a = 0
-\end{lstlisting}
-%Similarly, a group is a division monoid in which the inverse operation acts as a left inverse with respect to multiplication:
-%\begin{lstlisting}
-%class Group (G : Type u) extends DivInvMonoid G where
-% inv_mul_cancel : ∀ a : G, a⁻¹ * a = 1
-%\end{lstlisting}
-Abelian magma is a structure on any type that has commutative addition:
-\begin{lstlisting}
-class AddCommMagma (G : Type u) extends Add G where
- add_comm : ∀ a b : G, a + b = b + a
-\end{lstlisting}
-Similarly, commutative magma is a structure on any type that has commutative multiplication:
-\begin{lstlisting}
-class CommMagma (G : Type u) extends Mul G where
- mul_comm : ∀ a b : G, a * b = b * a
-\end{lstlisting}
-Abelian semigroup is an abelian magma and an additive semigroup at the same time:
-\begin{lstlisting}
-class AddCommSemigroup (G : Type u) extends AddSemigroup G, AddCommMagma G
-\end{lstlisting}
-Similarly, commutative semigroup is a commutative magma and a semigroup at the same time:
-\begin{lstlisting}
-class CommSemigroup (G : Type u) extends Semigroup G, CommMagma G
-\end{lstlisting}
-Abelian monoid is an additive monoid and an abelian semigroup at the same time:
-\begin{lstlisting}
-class AddCommMonoid (M : Type u) extends AddMonoid M, AddCommSemigroup M
-\end{lstlisting}
-Similarly, commutative monoid is a monoid and a commutative semigroup at the same time:
-\begin{lstlisting}
-class CommMonoid (M : Type u) extends Monoid M, CommSemigroup M
-\end{lstlisting}
-Abelian group is an additive group and an abelian monoid at the same time:
-\begin{lstlisting}
-class AddCommGroup (G : Type u) extends AddGroup G, AddCommMonoid G
-\end{lstlisting}
-Distrib is a structure on any type with addition and multiplication where
-both left distributivity and right distributivity hold:
-\begin{lstlisting}
-class Distrib (R : Type*) extends Mul R, Add R where
- left_distrib : ∀ a b c : R, a * (b + c) = a * b + a * c
- right_distrib : ∀ a b c : R, (a + b) * c = a * c + b * c
-\end{lstlisting}
-Nonunital-nonassociative-semiring is an abelian monoid with distributive multiplication and well-behaved zero:
-\begin{lstlisting}
-class MulZeroClass (M₀ : Type u) extends Mul M₀, Zero M₀ where
- zero_mul : ∀ a : M₀, 0 * a = 0
- mul_zero : ∀ a : M₀, a * 0 = 0
-class NonUnitalNonAssocSemiring (α : Type u) extends AddCommMonoid α, Distrib α, MulZeroClass α
-\end{lstlisting}
-Nonunital-semiring is a nonunital-nonassociative-semiring that forms a semigroup with zero (i.e., the
-semiring requirement makes it associative):
-\begin{lstlisting}
-class SemigroupWithZero (S₀ : Type u) extends Semigroup S₀, MulZeroClass S₀
-class NonUnitalSemiring (α : Type u) extends NonUnitalNonAssocSemiring α, SemigroupWithZero α
-\end{lstlisting}
-Additive monoid with one and abelian monoid with one are defined from additive monoid
-and abelian monoid, equipped with the symbol ``one'' and embedding of natural numbers
-(please ignore the difference between \texttt{Type u} and \texttt{Type*} as it is the same thing
-for all our purposes):
-\begin{lstlisting}
-class AddMonoidWithOne (R : Type*) extends NatCast R, AddMonoid R, One R where
- natCast := Nat.unaryCast
- natCast_zero : natCast 0 = 0
- natCast_succ : ∀ n : ℕ, natCast (n + 1) = (natCast n) + 1
-class AddCommMonoidWithOne (R : Type*) extends AddMonoidWithOne R, AddCommMonoid R
-\end{lstlisting}
-Additive group with one is an additive monoid with one and additive group, and embeds all integers:
-\begin{lstlisting}
-class AddGroupWithOne (R : Type u) extends IntCast R, AddMonoidWithOne R, AddGroup R where
- intCast := Int.castDef
- intCast_ofNat : ∀ n : ℕ, intCast (n : ℕ) = Nat.cast n
- intCast_negSucc : ∀ n : ℕ, intCast (Int.negSucc n) = - Nat.cast (n + 1)
-\end{lstlisting}
-Nonassociative-semiring is a nonunital-nonassociative-semiring that has a well-behaved multiplication
-by both zero and one and forms an abelian monoid with one (i.e., the unit is finally defined):
-\begin{lstlisting}
-class MulZeroOneClass (M₀ : Type u) extends MulOneClass M₀, MulZeroClass M₀
-class NonAssocSemiring (α : Type u) extends NonUnitalNonAssocSemiring α, MulZeroOneClass α,
- AddCommMonoidWithOne α
-\end{lstlisting}
-Semiring is the combination of nonunital-semiring and nonassociative-semiring that forms a monoid with zero:
-\begin{lstlisting}
-class MonoidWithZero (M₀ : Type u) extends Monoid M₀, MulZeroOneClass M₀, SemigroupWithZero M₀
-class Semiring (α : Type u) extends NonUnitalSemiring α, NonAssocSemiring α, MonoidWithZero α
-\end{lstlisting}
-Ring is a semiring and an abelian group at the same time that has ``one'' that behaves well:
-\begin{lstlisting}
-class Ring (R : Type u) extends Semiring R, AddCommGroup R, AddGroupWithOne R
-\end{lstlisting}
-Commutative ring is a ring (guarantees commutative addition) and a commutative monoid (guarantees commutative multiplication)
-at the same time.
-\begin{lstlisting}
-class CommRing (α : Type u) extends Ring α, CommMonoid α
-\end{lstlisting}
-Division ring is a nontrivial ring whose multiplication forms a division monoid, whose nonzero elements have
-multiplicative inverses, whose zero is inverse to itself
-(if you find the equality $0^{-1}\!=\!0$ disturbing, read \cite{DivBy0} that explains it),
-and embeds rational numbers:
-\begin{lstlisting}
-class Nontrivial (α : Type*) : Prop where
- exists_pair_ne : ∃ x y : α, x ≠ y
-class DivisionRing (α : Type*) extends Ring α, DivInvMonoid α, Nontrivial α, NNRatCast α, RatCast α where
- mul_inv_cancel : ∀ (a : α), a ≠ 0 → a * a⁻¹ = 1
- inv_zero : (0 : α)⁻¹ = 0
- nnratCast := NNRat.castRec
-\end{lstlisting}
-Field is a commutative ring and a division ring at the same time:
-\begin{lstlisting}
-class Field (K : Type u) extends CommRing K, DivisionRing K
-\end{lstlisting}
-Preorder is a reflexive \& transitive relation on any structure with binary relational symbols $\le$ and $<$
-where the strict comparison $a < b$ is equivalent to $a \le b \aand \neg (b \le a)$ given by the relation $\le$
-which is neither required to be symmetric nor required to be antisymmetric:
-\begin{lstlisting}
-class Preorder (α : Type u) extends LE α, LT α where
- le_refl : ∀ a : α, a ≤ a
- le_trans : ∀ a b c : α, a ≤ b → b ≤ c → a ≤ c
- lt_iff_le_not_le : ∀ a b : α, a < b ↔ a ≤ b ∧ ¬b ≤ a
-\end{lstlisting}
-Partial order is a reflexive \& antisymmetric \& transitive relation:
-\begin{lstlisting}
-class PartialOrder (α : Type u) extends Preorder α where
- le_antisymm : ∀ a b : α, a ≤ b → b ≤ a → a = b
-\end{lstlisting}
-Ordered abelian group is an abelian group with partial order that respects addition:
-\begin{lstlisting}
-class OrderedAddCommGroup (α : Type u) extends AddCommGroup α, PartialOrder α where
- add_le_add_left : ∀ a b : α, a ≤ b → ∀ c : α, c + a ≤ c + b
-\end{lstlisting}
-Strictly ordered ring is a nontrivial ring whose addition behaves as an ordered abelian group
-where zero is less or equal to one and the product of two strictly positive elements is strictly positive:
-\begin{lstlisting}
-class StrictOrderedRing (α : Type u) extends Ring α, OrderedAddCommGroup α, Nontrivial α where
- zero_le_one : 0 ≤ (1 : α)
- mul_pos : ∀ a b : α, 0 < a → 0 < b → 0 < a * b
-\end{lstlisting}
-Linear order (sometimes called total order) is a partial order where every two elements are comparable;
-technical details are omitted:
-\begin{lstlisting}
-class LinearOrder (α : Type u) extends PartialOrder α, (...)
- le_total (a b : α) : a ≤ b ∨ b ≤ a
- (...)
-\end{lstlisting}
-Linearly ordered abelian group is an ordered abelian group whose order is linear:
-\begin{lstlisting}
-class LinearOrderedAddCommGroup (α : Type u) extends OrderedAddCommGroup α, LinearOrder α
-\end{lstlisting}
-Linearly ordered ring is a strictly ordered ring where every two elements are comparable:
-\begin{lstlisting}
-class LinearOrderedRing (α : Type u) extends StrictOrderedRing α, LinearOrder α
-\end{lstlisting}
-Linearly ordered commutative ring is a linearly ordered ring and commutative monoid at the same time:
-\begin{lstlisting}
-class LinearOrderedCommRing (α : Type u) extends LinearOrderedRing α, CommMonoid α
-\end{lstlisting}
-In our project, we define a linearly ordered division ring as a division ring that is a linearly ordered ring at the same time:
-\begin{lstlisting}
-class LinearOrderedDivisionRing (R : Type*) extends LinearOrderedRing R, DivisionRing R
-\end{lstlisting}
-Linearly ordered field is defined in Mathlib as a linearly ordered commutative ring that is a field at the same time:
-\begin{lstlisting}
-class LinearOrderedField (α : Type*) extends LinearOrderedCommRing α, Field α
-\end{lstlisting}
-Note that \texttt{LinearOrderedDivisionRing} is not a part of the algebraic hierarchy provided by Mathlib,
-hence \texttt{LinearOrderedField} does~not~inherit \texttt{LinearOrderedDivisionRing}.
-Because of that, we provide a custom instance that converts
-\texttt{LinearOrderedField} to \texttt{LinearOrderedDivisionRing}~as~follows:
-\begin{lstlisting}
-instance LinearOrderedField.toLinearOrderedDivisionRing {F : Type*} [instF : LinearOrderedField F] :
- LinearOrderedDivisionRing F := { instF with }
-\end{lstlisting}
-This instance is needed for the step from \ref{coordinateFarkas} to \ref{equalityFarkas}.
-
-\subsection{Extended linearly ordered fields}
-
-Given any type $F$, we construct $F \cup \{ \bot, \top \}$ as follows:
-\begin{lstlisting}
-def Extend (F : Type*) := WithBot (WithTop F)
-\end{lstlisting}
-From now on we assume that $F$ is a linearly ordered field:
-\begin{lstlisting}
-variable {F : Type*} [LinearOrderedField F]
-\end{lstlisting}
-The following instance defines how addition and comparison
-behaves on $F_\infty$ and automatically generates a proof
-that $F_\infty$ forms a linearly ordered abelian monoid:
-\begin{lstlisting}
-instance : LinearOrderedAddCommMonoid (Extend F) :=
- inferInstanceAs (LinearOrderedAddCommMonoid (WithBot (WithTop F)))
-\end{lstlisting}
-The following definition embeds $F$ in $F_\infty$ and registers
-this canonical embedding as a type coercion:
-\begin{lstlisting}
-@[coe] def toE : F → (Extend F) := some ∘ some
-instance : Coe F (Extend F) := ⟨toE⟩
-\end{lstlisting}
-Unary minus on $F_\infty$ is defined as follows:
-\begin{lstlisting}
-def neg : Extend F → Extend F
-| ⊥ => ⊤
-| ⊤ => ⊥
-| (x : F) => toE (-x)
-instance : Neg (Extend F) := ⟨EF.neg⟩
-\end{lstlisting}
-Line-by-line, we see that:
-\begin{itemize}
-\item negating $\bot$ gives $\top$
-\item negating $\top$ gives $\bot$
-\item negating any finite value $x$ gives $-x$ converted to the type $F_\infty$
-\end{itemize}
-In the file \texttt{FarkasSpecial.lean} and everything downstream,
-we have the notation
-\begin{lstlisting}
-F∞
-\end{lstlisting}
-for $F_\infty$ and also the notation
-\begin{lstlisting}
-F≥0
-\end{lstlisting}
-for the type of nonnegative elements of $F$.
-We define a scalar action of the nonnegative elements
-on $F_\infty$ as follows:
-\begin{lstlisting}
-def EF.smulNN (c : F≥0) : F∞ → F∞
-| ⊥ => ⊥
-| ⊤ => if c = 0 then 0 else ⊤
-| (f : F) => toE (c.val * f)
-\end{lstlisting}
-Line-by-line, we see that:
-\begin{itemize}
-\item multiplying $\bot$ by anything from the left gives $\bot$
-\item multiplying $\top$ by $0$ from the left gives $0$;
-multiplying $\top$ by a strictly positive element from the left gives $\top$
-\item multiplying any finite value $f$ by a coefficient $c$ from the left
-gives $c \* f$ converted to the type $F_\infty$
-\end{itemize}
-Scalar action on $F_\infty$ by negative elements from the left is undefined.
-Multiplication between two elements of $F_\infty$ is also undefined.
-
-\subsection{Vectors and matrices}
-
-We distinguish two types of vectors; implicit vectors and explicit vectors.
-Implicit vectors (called just ``vectors'') are members of a vector space;
-they do not have any internal structure
-(in the informal text, we use the word ``vectors'' somewhat loosely;
-it can refer to members of any module).
-Explicit vectors are functions from coordinates to values (again, we use the word
-``explicit vector`` (or just ``vector'' when it is clear from context that our vector
-is a map) not only when they form a vector space).
-The type of coordinates does not have to be ordered.
-Matrices live next to explicit vectors. They are also functions; they take a row index
-and a column index and they output a value at the given spot.
-Neither the row indices nor the column vertices are required to form an ordered type.
-This is why multiplication between matrices and vectors is defined only in structures
-where addition forms a commutative semigroup. Consider the following example:
-$$
-\begin{pmatrix}
- 1 & 2 & 3 \\
- 4 & 5 & 6 \\
-\end{pmatrix}
-\*
-\begin{pmatrix}
- 7 \\ 8 \\ 9
-\end{pmatrix}
-=
-\begin{pmatrix}
- ? \\ \_
-\end{pmatrix}
-$$
-We do not know whether the value at the question mark is equal to
-$ 1 \* 7 + 2 \* 8 + 3 \* 9 $ or to
-$ 2 \* 8 + 1 \* 7 + 3 \* 9 $ or to
-any other ordering of summands.
-This is why commutativity of addition is necessary for the definition to be valid.
-On the other hand, we do not assume any property of multiplication in the
-definition of multiplication between matrices and vectors; they do not even
-have to be of the same type; we only require the elements of the vector
-to have an action on the elements of the matrix (this is not a typo\:---\:normally,
-we would want matrices to have an action on vectors\:---\:not in our work).
-
-\subsubsection{Mathlib definitions versus our definitions}
-
-Mathlib defines dot product (i.e., a product of an explicit vector with an explicit vector) as follows:
-\begin{lstlisting}
-def Matrix.dotProduct [Fintype m] [Mul α] [AddCommMonoid α] (v w : m → α) : α :=
- ∑ i, v i * w i
-\end{lstlisting}
-Mathlib defines a product of a matrix with an explicit vector as follows:
-\begin{lstlisting}
-def Matrix.mulVec [Fintype n] [NonUnitalNonAssocSemiring α] (M : Matrix m n α) (v : n → α) : m → α
- | i => (fun j => M i j) ⬝ᵥ v
-\end{lstlisting}
-Mildly confusingly, \texttt{m} and \texttt{n} are type variables here, not natural numbers.
-Note that, when multiplying two vectors,
-the left vector is not transposed\:---\:a vector is not
-defined as a special case of matrix, and transposition is not
-defined as an operation on explicit vectors, only on matrices.
-
-Infix notation for these two operations is defined as follows
-(the keyword \texttt{infixl} when defining an operator $\circ$ means that
-\hbox{\texttt{x $\circ$ y $\circ$ z}} gets parsed as \texttt{(x $\circ$ y) $\circ$ z}
-whether or not it makes sense, whereäs the keyword \texttt{infixr} when defining an operator $\circ$ means that %lb
-\hbox{\texttt{A $\circ$ B $\circ$ v}} gets parsed as \texttt{A $\circ$ (B $\circ$ v)}
-whether or not it makes sense; the numbers 72 and 73 determine precedence):
-\begin{lstlisting}
-scoped infixl:72 " ⬝ᵥ " => Matrix.dotProduct
-scoped infixr:73 " *ᵥ " => Matrix.mulVec
-\end{lstlisting}
-These definitions are sufficient for stating results based on linearly ordered fields.
-However, our results from \sekt{extensions} require a more general notion of
-multiplication between matrices and vectors.
-They are defined in the section hetero\_matrix\_products\_defs as follows:
-\begin{lstlisting}
-variable {α γ : Type*} [AddCommMonoid α] [SMul γ α]
-
-def Matrix.dotProd (v : I → α) (w : I → γ) : α :=
- ∑ i : I, w i • v i
-infixl:72 " ᵥ⬝ " => Matrix.dotProd
-
-def Matrix.mulWeig (M : Matrix I J α) (w : J → γ) (i : I) : α :=
- M i ᵥ⬝ w
-infixr:73 " ₘ* " => Matrix.mulWeig
-\end{lstlisting}
-We start by declaring that $\alpha$ and $\gamma$ are types from
-any universe (not necessarily both from the same universe).
-We require that $\alpha$ forms an abelian monoid and that
-$\gamma$ has a scalar action on $\alpha$. In this setting,
-we can instantiate $\alpha$ with $F_\infty$ and $\gamma$
-with $F$ for any linearly ordered field $F$.
-
-For explicit vectors
-$v : I \r \alpha$ and $w : I \r \gamma$, we define
-their product of type $\alpha$ as follows.
-Every element of $v$ gets multiplied from left by
-an element of $w$ on the same index.
-Then we sum them all together (in unspecified order).
-For a matrix $M : (I \times J) \r \alpha$ and
-a vector $w : J \r \gamma$, we define
-their product of type $I \r \alpha$ as a function
-that takes an index $i$ and outputs the dot product
-between the $i$-th row of $M$ and the vector $w$.
-
-Beware that the arguments (both in the function definition and
-in the infix notation) come in the opposite order from how
-scalar action is written. We recommend a mnemonic
-``vector times weights'' for $v \* w$ and
-``matrix times weights'' for $M \* w$ where
-arguments come in alphabetical order.
-
-In the infix notation, you can distinguish between the standard
-Mathlib definitions and our definitions by observing that Mathlib
-operators put the letter $_v$ to the right of the symbol whereäs
-our operators put a letter to the left of the symbol.
-
-We find it unfortunate that the Mathlib name of \texttt{dotProduct}
-is prefixed by the \texttt{Matrix} namespace.
-While \texttt{Matrix.mulVec} allows for the use of dot notation
-in places where infix notation is not used, the full name
-\texttt{Matrix.dotProduct} only clutters the code.
-Even though we do not like it, we decided to namespace our definitions
-in the same way for consistency.
-
-Since we have new definitions, we have to rebuild all API (a lot of lemmas)
-for \texttt{Matrix.dotProd} and \texttt{Matrix.mulWeig} from scratch.
-This process was very tiresome. We decided not to develop a full reusable
-library, but prove only those lemmas we wanted to use in our project.
-For similar reasons, we did not generalize the Mathlib definition of
-``vector times matrix'', as ``matrix times vector'' was all we needed.
-It was still a lot of lemmas.
-
-\subsection{Modules and how to order them}
-\label{modules}
-
-Given types $\alpha$ and $\beta$ such that $\alpha$ has a scalar action on $\beta$
-and $\alpha$ forms a monoid, Mathlib defines multiplicative action where
-$1$ of type $\alpha$ gives the identity action and multiplication in the monoid
-associates with the scalar action:
-\begin{lstlisting}
-class MulAction (α : Type*) (β : Type*) [Monoid α] extends SMul α β where
- one_smul : ∀ b : β, (1 : α) • b = b
- mul_smul : ∀ (x y : α) (b : β), (x * y) • b = x • y • b
-\end{lstlisting}
-For a distributive multiplicative action, we furthermore require the latter type
-to form an additive monoid and two more properties are required; multiplying the zero
-gives the zero, and the multiplicative action is distributive with respect to the addition:
-\begin{lstlisting}
-class DistribMulAction (M A : Type*) [Monoid M] [AddMonoid A] extends MulAction M A where
- smul_zero : ∀ a : M, a • (0 : A) = 0
- smul_add : ∀ (a : M) (x y : A), a • (x + y) = a • x + a • y
-\end{lstlisting}
-We can finally review the definition of a module. Here the former type must form a semiring
-and the latter type abelian monoid. Module requires a distributive multiplicative action and
-two additional properties; addition in the semiring distributes with the multiplicative action,
-and multiplying by the zero from the semiring gives a zero in the abelian monoid:
-\begin{lstlisting}
-class Module (R : Type u) (M : Type v) [Semiring R] [AddCommMonoid M] extends DistribMulAction R M where
- add_smul : ∀ (r s : R) (x : M), (r + s) • x = r • x + s • x
- zero_smul : ∀ x : M, (0 : R) • x = 0
-\end{lstlisting}
-Note the class \texttt{Module} does not extend the class \texttt{Semiring}; instead,
-it requires \texttt{Semiring} as an argument.
-The abelian monoid is also required as argument in the definition. We call such a class ``mixin''.
-Thanks to this design, we do not need to define superclasses of \texttt{Module} in order to require
-``more than a module''. Instead, we use superclasses in the respective arguments, i.e.,
-``more than a semiring'' and/or ``more than an abelian monoid''. For example, if we replace
-\begin{lstlisting}
-[Semiring R] [AddCommMonoid M] [Module R M]
-\end{lstlisting}
-in a theorem statement by
-\begin{lstlisting}
-[Field R] [AddCommGroup M] [Module R M]
-\end{lstlisting}
-we require \texttt{M} to be a vector space over \texttt{R}. We do not need to extend
-\texttt{Module} in order to define what a vector space is.
-
-In our case, to state the theorem \ref{fintypeFarkasBartl},
-we need \texttt{R} to be a linearly ordered division ring,
-we need \texttt{W} to be an \texttt{R}-module, and
-we need \texttt{V} to be a linearly ordered \texttt{R}-module.
-The following list of requirements is almost correct:
-\begin{lstlisting}
-[LinearOrderedDivisionRing R] [AddCommGroup W] [Module R W] [LinearOrderedAddCommGroup V] [Module R V]
-\end{lstlisting}
-The only missing assumption is the relationship between
-how \texttt{R} is ordered and how \texttt{V} is ordered.
-For that, we use another mixin, defined in Mathlib as follows:
-\begin{lstlisting}
-class PosSMulMono (α β : Type*) [SMul α β] [Preorder α] [Preorder β] : Prop where
- elim {a : α} (ha : 0 ≤ a) {b₁ b₂ : β} (hb : b₁ ≤ b₂) : a • b₁ ≤ a • b₂
-\end{lstlisting}
-Adding \texttt{[PosSMulMono R V]} to the list of requirements solves the issue.
-
-We encourage the reader to try and delete various assumptions and see which parts of the proofs
-get underlined in red.
-
-{\color{blue}
-TODO explain ordering (specifically, nonnegativity) on explicit vectors.
-}
-
-Throughout the paper, we do not distinguish between
-nonnegative explicit vectors and explicit vectors of nonnegative elements.
-The code, however, does distinguish between them.
-
-
-\subsection{Linear programming}
-
-Extended linear programs are defined as follows: {\color{blue} TODO Matrix was not defined!}
-\begin{lstlisting}
-structure ExtendedLP (I J F : Type*) [LinearOrderedField F] where
- A : Matrix I J F∞
- b : I → F∞
- c : J → F∞
-\end{lstlisting}
-Vector $x$ made of finite nonnegative values is a solution if and only if $A \* x \le b$
-holds:
-\begin{lstlisting}
-def ExtendedLP.IsSolution (P : ExtendedLP I J F) (x : J → F≥0) : Prop :=
- P.A ₘ* x ≤ P.b
-\end{lstlisting}
-$P$ reaches a value $r$ if and only if $P$ has solution $x$ such that $c \* x = r$ holds:
-\begin{lstlisting}
-def ExtendedLP.Reaches (P : ExtendedLP I J F) (r : F∞) : Prop :=
- ∃ x : J → F≥0, P.IsSolution x ∧ P.c ᵥ⬝ x = r
-\end{lstlisting}
-$P$ is feasible if and only if $P$ reaches a value different from $\top$:
-\begin{lstlisting}
-def ExtendedLP.IsFeasible [Fintype J] (P : ExtendedLP I J F) : Prop :=
- ∃ p : F∞, P.Reaches p ∧ p ≠ ⊤
-\end{lstlisting}
-$P$ is bounded by a value $r$ (from below -- we always minimize) if and only if
-$P$ reaches only values greater or equal to $r$:
-\begin{lstlisting}
-def ExtendedLP.IsBoundedBy (P : ExtendedLP I J F) (r : F) : Prop :=
- ∀ p : F∞, P.Reaches p → r ≤ p
-\end{lstlisting}
-$P$ is unbounded if and only if $P$ has no finite lower bound:
-\begin{lstlisting}
-def ExtendedLP.IsUnbounded (P : ExtendedLP I J F) : Prop :=
- ¬∃ r : F, P.IsBoundedBy r
-\end{lstlisting}
-Valid extended linear programs are defined as follows (the six conditions were
-introduced in \sekt{extensions}):
-\begin{lstlisting}
-structure ValidELP (I J F : Type*) [LinearOrderedField F] extends ExtendedLP I J F where
- hAi : ¬∃ i : I, (∃ j : J, A i j = ⊥) ∧ (∃ j : J, A i j = ⊤)
- hAj : ¬∃ j : J, (∃ i : I, A i j = ⊥) ∧ (∃ i : I, A i j = ⊤)
- hbA : ¬∃ i : I, (∃ j : J, A i j = ⊥) ∧ b i = ⊥
- hcA : ¬∃ j : J, (∃ i : I, A i j = ⊤) ∧ c j = ⊥
- hAb : ¬∃ i : I, (∃ j : J, A i j = ⊤) ∧ b i = ⊤
- hAc : ¬∃ j : J, (∃ i : I, A i j = ⊥) ∧ c j = ⊤
-\end{lstlisting}
-The following definition says how linear programs are dualized (first, the abbreviation
-\texttt{ExtendedLP.dualize} says that $(A, b, c)$ is mapped to $(-A^T, c, b)$;
-then the definition \texttt{ValidELP.dualize} says that the dualization of \texttt{ValidELP}
-is inherited from \texttt{ExtendedLP.dualize}; the remaining six lines generate
-proofs that our six conditions stay satisfied after dualization,
-where \texttt{aeply} is a custom tactic based on \texttt{aesop} \cite{Aesop}
-and \texttt{apply}):
-\begin{lstlisting}
-abbrev ExtendedLP.dualize (P : ExtendedLP I J F) : ExtendedLP J I F :=
- ⟨-P.Aᵀ, P.c, P.b⟩
-
-def ValidELP.dualize (P : ValidELP I J F) : ValidELP J I F where
- toExtendedLP := P.toExtendedLP.dualize
- hAi := by aeply P.hAj
- hAj := by aeply P.hAi
- hbA := by aeply P.hcA
- hcA := by aeply P.hbA
- hAb := by aeply P.hAc
- hAc := by aeply P.hAb
-\end{lstlisting}
-The definition of optimum is, sadly, very complicated (will be explained below):
-\begin{lstlisting}
-noncomputable def ExtendedLP.optimum [Fintype J] (P : ExtendedLP I J F) : Option F∞ :=
- if ¬P.IsFeasible then
- some ⊤
- else
- if P.IsUnbounded then
- some ⊥
- else
- if hr : ∃ r : F, P.Reaches (toE r) ∧ P.IsBoundedBy r then
- some (toE hr.choose)
- else
- none
-\end{lstlisting}
-The type \texttt{Option F$\infty$}, which is implemented as
-\texttt{Option (Option (Option F))} after unfolding definitions,
-allows the following values:
-\begin{itemize}
-\item \texttt{none}
-\item \texttt{some $\bot$} implemented as \texttt{some none}
-\item \texttt{some $\top$} implemented as \texttt{some (some none)}
-\item \texttt{some (toE r)} implemented as \texttt{some (some (some r))} for any \texttt{r :~F}
-\end{itemize}
-We assign the following semantics to \texttt{Option F$\infty$} values:
-\begin{itemize}
-\item \texttt{none} $\ \dots$ invalid finite value (infimum is not attained)
-\item \texttt{some $\bot$} $\ \dots$ feasible unbounded
-\item \texttt{some $\top$} $\ \dots$ infeasible
-\item \texttt{some (toE r)} $\ \dots$ the minimum is a finite value $r$
-\end{itemize}
-The definition \texttt{ExtendedLP.optimum} first asks if $P$ is feasible;
-if not, it returns \texttt{some $\top$} (i.e., the worst value).
-When $P$ is feasible, it asks whether $P$ is unbounded; if yes, it returns \texttt{some $\bot$}
-(i.e., the best value).
-When $P$ is feasible and bounded,
-it asks if there is a finite value $r$ such that $P$ reaches $r$ and, at the same time,
-$P$ is bounded by $r$; is so, it returns \texttt{some (toE r)}.
-Otherwise, it returns \texttt{none}.
-Note that we use the verbs ``ask'' and ``return'' metaphorically;
-\texttt{ExtendedLP.optimum} is not a computable function; it is just a mathematical
-definition (see the keyword \texttt{noncomputable def} above) you can prove things about.
-
-Finally, we define what opposite values of the type \texttt{Option F$\infty$} are
-(note that for any values except \texttt{none} we directly follow the definition
-of negation a.k.a.\ unary minus on the extended linearly ordered fields):
-\begin{lstlisting}
-def OppositesOpt : Option F∞ → Option F∞ → Prop
-| (p : F∞), (q : F∞) => p = -q
-| _ , _ => False
-\end{lstlisting}
-For example, \texttt{OppositesOpt (-3) 3} and \texttt{OppositesOpt 5 (-5)} hold.
-The last line says that \texttt{OppositesOpt none none} is false.
-We~later~show that \texttt{none} is never the optimum of a valid\footnote{
-In principle, we could say that \texttt{none} is never the optimum of any
-linear program, i.e., $P^\ast$ is defined for every (extended) linear program $P$,
-but we did not bother proving it.} linear program, but we do not know it yet.
-
-
-\section{Formal statements of selected results}
-\label{statements}
-
-\subsection{Main corollaries}
-
-\begin{lstlisting}
-theorem equalityFarkas (A : Matrix I J F) (b : I → F) :
- (∃ x : J → F, 0 ≤ x ∧ A *ᵥ x = b) ≠ (∃ y : I → F, 0 ≤ Aᵀ *ᵥ y ∧ b ⬝ᵥ y < 0)
-\end{lstlisting}
-\begin{lstlisting}
-theorem inequalityFarkas [DecidableEq I] (A : Matrix I J F) (b : I → F) :
- (∃ x : J → F, 0 ≤ x ∧ A *ᵥ x ≤ b) ≠ (∃ y : I → F, 0 ≤ y ∧ 0 ≤ Aᵀ *ᵥ y ∧ b ⬝ᵥ y < 0)
-\end{lstlisting}
-\begin{lstlisting}
-theorem StandardLP.strongDuality {P : StandardLP I J R} (hP : P.IsFeasible ∨ P.dualize.IsFeasible) :
- OppositesOpt P.optimum P.dualize.optimum
-\end{lstlisting}
-
-\subsection{Main results}
-
-\begin{lstlisting}
-theorem fintypeFarkasBartl {J : Type*} [Fintype J] [LinearOrderedDivisionRing R]
- [LinearOrderedAddCommGroup V] [Module R V] [PosSMulMono R V] [AddCommGroup W] [Module R W]
- (A : W →ₗ[R] J → R) (b : W →ₗ[R] V) :
- (∃ x : J → V, 0 ≤ x ∧ ∀ w : W, ∑ j : J, A w j • x j = b w) ≠ (∃ y : W, 0 ≤ A y ∧ b y < 0)
-\end{lstlisting}
-\begin{lstlisting}
-theorem extendedFarkas (A : Matrix I J F∞) (b : I → F∞)
- (hAi : ¬∃ i : I, (∃ j : J, A i j = ⊥) ∧ (∃ j : J, A i j = ⊤))
- (hAj : ¬∃ j : J, (∃ i : I, A i j = ⊥) ∧ (∃ i : I, A i j = ⊤))
- (hAb : ¬∃ i : I, (∃ j : J, A i j = ⊤) ∧ b i = ⊤)
- (hbA : ¬∃ i : I, (∃ j : J, A i j = ⊥) ∧ b i = ⊥) :
- (∃ x : J → F≥0, A ₘ* x ≤ b) ≠ (∃ y : I → F≥0, -Aᵀ ₘ* y ≤ 0 ∧ b ᵥ⬝ y < 0)
-\end{lstlisting}
-\begin{lstlisting}
-theorem ExtendedLP.strongDuality {P : ExtendedLP I J F} (hP : P.IsFeasible ∨ P.dualize.IsFeasible) :
- OppositesOpt P.optimum P.dualize.optimum
-\end{lstlisting}
-
-
-\section {Proving the Farkas-Bartl theorem}
-\label{bartl}
-
-We prove \ref{finFarkasBartl} and, in the end, we obtain \ref{fintypeFarkasBartl} as corollary.
-
-\begin{theorem}[\customlabel{finFarkasBartl}{\texttt{finFarkasBartl}}]
-Let $n$ be a natural number.
-Let $R$ be a linearly ordered division ring.
-Let $W$ be an $R$-module.
-Let $V$ be a linearly ordered $R$-module whose ordering satisfies
-monotonicity of scalar multiplication by nonnegative elements on the left.
-Let $A$ be an $R$-linear map from $W$ to $(\fin{n} \r R)$.
-Let $b$ be an $R$-linear map from $W$ to $V$.
-Exactly one of the following exists:
-\begin{itemize}
-\item nonnegative vector family $x : \fin{n} \r V$ such that, for all $w : W$, we have
-$ \sum_{j : \fin{n}}\; (A~w)_j \cdot x_j = b~w $
-\item vector $y : W$ such that $A~y \ge 0$ and $b~y < 0$
-\end{itemize}
-\end{theorem}
-The only difference from \ref{fintypeFarkasBartl} is that
-\ref{finFarkasBartl} uses $\fin{n} = \{ 0, \dots, n\!-\!1 \}$
-instead of an arbitrary (unordered) finite type $J$.
-
-\medskip \noindent
-Proof idea:
-We first prove that both cannot exist at the same time.
-Assume we have $x$ and $y$ of said properties.
-We plug $y$ for $w$ and obtain
-$ \sum_{j : \fin{n}}\; (A~y)_j \cdot x_j = b~y $.
-On the left-hand side, we have a sum of nonnegative vectors,
-which contradicts $b~y < 0$.
-\smallskip
-
-We prove ``at least one exists'' by induction on $n$.
-If $n=0$ then $A~y \ge 0$ is a tautology.
-We consider $b$. Either $b$ maps everything to the
-zero vector, which allows $x$ to be the empty vector family,
-or some $w$ gets mapped to a nonzero vector, where
-we choose $y$ to be either $w$ or $(-w)$.
-Since $V$ is linearly ordered, one of them satisfies $b~y<0$.
-Now we precisely state how the induction step will be.
-
-\begin{lemma}[\customlabel{industepFarkasBartl}{\texttt{industepFarkasBartl}}]
-Let $m$ be a natural number.
-Let $R$ be a linearly ordered division ring.
-Let $W$ be an $R$-module.
-Let $V$ be a linearly ordered $R$-module whose ordering satisfies
-monotonicity of scalar multiplication by nonnegative elements on the left.
-Assume (induction hypothesis) that
-for all $R$-linear maps $\bar{A} : W \r (\fin{m} \r R)$
-and $\bar{b} : W \r V$, the formula
-``$\forall \bar{y} : W \st \bar{A}~\bar{y} \ge 0 \implies \bar{b}~\bar{y} \ge 0$''
-implies existence of a nonnegative vector family $\bar{x} : \fin{m} \r V$ such that,
-for all $\bar{w} : W$, $ \sum_{i : \fin{m}}\; (\bar{A}~\bar{w})_i \cdot \bar{x}_i = \bar{b}~\bar{w} $.
-Let $A$ be an $R$-linear map from $W$ to $(\fin{m\!+\!1} \r R)$.
-Let $b$ be an $R$-linear map from $W$ to~$V$.
-Assume that, for all $y : W$, $\;A~y \ge 0$ implies $b~y \ge 0$.
-We claim there exists a nonnegative vector family $x : \fin{m\!+\!1} \r V$
-such that, for all $w : W$, we have
-$ \sum_{i : \fin{m+1}}\; (A~w)_i \cdot x_i = b~w $.
-\end{lemma}
-
-\noindent
-Proof idea:
-Let $A_{ 0$, we finish the proof using $p := z^{-1} \* c \* x$
-and $q := z^{-1} \* b \* y$ (and we do not care that this case
-cannot happen in reality).
-It remains to prove that $z=0$ cannot happen.
-At least one of $b \* y$ or $c \* x$ must be strictly negative.
-If $c \* x < 0$, we apply \ref{unbounded-of-feasible-of-neg} to
-$P$, which (using \ref{infeasible-of-unbounded}) contradicts
-that the dual of $P$ is feasible.
-If $b \* y < 0$, we apply \ref{unbounded-of-feasible-of-neg} to
-the dual of $P$, which (using \ref{infeasible-of-unbounded} and
-\ref{dualize-dualize}) contradicts that $P$ is feasible.
-
-Lemma \ref{no-bot-of-feasible} is used 12 times throughout this proof.
-
-\begin{lemma}[\customlabel{strongDuality-of-both-feasible}{\texttt{strongDuality\_of\_both\_feasible}}]
-Let $P$ be a valid linear program such that
-$P$ is feasible and the dual of $P$ is also feasible.
-There is a finite value $r$ such that
-$P$ reaches $-r$ and the dual of $P$ reaches $r$.
-\end{lemma}
-
-\noindent
-Proof idea:
-From \ref{strongDuality-aux} we have a value $p$ reached by $P$ and
-a value $q$ reached by the dual of $P$ such that $p + q \le 0$.
-We apply \ref{weakDuality} to $p$ and $q$ to obtain $p + q \ge 0$.
-We set $r := q$.
-
-\begin{lemma}[\customlabel{optimum-unique}{\texttt{optimum\_unique}}]
-Let $P$ be a valid linear program.
-Let $r$ be a value reached by $P$ such that $P$ is bounded by $r$.
-Let $s$ be a value reached by $P$ such that $P$ is bounded by $s$.
-Then $r = s$.
-\end{lemma}
-
-\noindent
-Proof idea:
-We prove $r \le s$ by applying ``$P$ is bounded by $r$'' to
-``$s$ is reached by $P$''.
-We prove $s \le r$ by applying ``$P$ is bounded by~$s$'' to
-``$r$ is reached by $P$''.
-
-\begin{lemma}[\customlabel{optimum-eq-of-reaches-bounded}{\texttt{optimum\_eq\_of\_reaches\_bounded}}]
-Let $P$ be a valid linear program.
-Let $r$ be a value reached by $P$ such that $P$ is bounded by $r$.
-Then the optimum of $P$ is $r$.
-\end{lemma}
-
-\noindent
-Proof idea:
-Apply the axiom of choice to the definition of optimum and use \ref{optimum-unique}.
-
-\begin{lemma}[\customlabel{strongDuality-of-prim-feas}{\texttt{strongDuality\_of\_prim\_feas}}]
-Let $P$ be a valid linear program that is feasible.
-Then the optimum of $P$ and the optimum of dual of $P$ are opposites.
-\end{lemma}
-
-\noindent
-Proof idea:
-If the dual of $P$ is feasible as well, use \ref{strongDuality-of-both-feasible}
-to obtain $r$ such that $P$ reaches $-r$ and the dual of $P$ reaches $r$.
-Using \ref{optimum-eq-of-reaches-bounded} together with \ref{weakDuality},
-conclude that the optimum of $P$ is $-r$.
-Using \ref{optimum-eq-of-reaches-bounded} together with \ref{weakDuality},
-conclude that the optimum of the dual of $P$ is $r$.
-Observe that $-r$ and $r$ are opposites.
-
-If the dual of $P$ is infeasible, the optimum of the dual of $P$ is
-$\top$ by definition. Using \ref{unbounded-of-feasible-of-infeasible}
-we get that the optimum of $P$ is $\bot$.
-Observe that $\top$ and $\bot$ are opposites.
-
-\begin{theorem}[\customlabel{optimum-neq-none}{\texttt{optimum\_neq\_none}}]
-Every valid linear program has optimum.
-\end{theorem}
-
-\noindent
-Proof idea:
-If a valid linear program $P$ is feasible, the existence of optimum
-follows from \ref{strongDuality-of-prim-feas}.
-Otherwise, the optimum of $P$ is $\top$ by definition.
-
-\begin{lemma}[\customlabel{strongDuality-of-dual-feas}{\texttt{strongDuality\_of\_dual\_feas}}]
-Let $P$ be a valid linear program whose dual is feasible.
-Then the optimum of $P$ and the optimum of dual of $P$
-are opposites.
-\end{lemma}
-
-\noindent
-Proof idea:
-Apply \ref{strongDuality-of-prim-feas} to the dual of $P$ and
-use \ref{dualize-dualize}.
-
-\begin{theorem}[\customlabelrestated{strongDuality:restated}{\texttt{strongDuality}}]
-Let $F$ be a linearly ordered field.
-Let $P$ be a valid linear program over $F_\infty$.
-If $P$ or its dual is feasible (at least one of them),
-then the optimum of $P$ and the optimum of dual of $P$
-are opposites.
-\end{theorem}
-
-\noindent
-Proof idea:
-Use \ref{strongDuality-of-prim-feas} or \ref{strongDuality-of-dual-feas}.
-
-
-\section{Counterexamples}
-\label{counterexamples}
-
-\subsection{Counterexamples for~\ref{extendedFarkas}}
-\label{sec:extendedFarkas:counterexamples}
-
-Recall that~\ref{extendedFarkas} has four preconditions on matrix $A$ and vector $b$.
-The following examples show that omitting any of these preconditions makes the theorem
-false\:---\:there may exist nonnegative vectors $x,y$ such that
-$A \* x \le b$, $(-A^T) \* y \le 0$, and $b \* y < 0$.
-
-$$
-\begin{array}{cc@{\hspace{30pt}}ccc@{\hspace{60pt}}l}
-A =
-\begin{pmatrix}
- \bot & \top \\
- 0 & -1
-\end{pmatrix}
-&
-b = \begin{pmatrix} 0 \\ -1 \end{pmatrix}
-&
-x = \begin{pmatrix} 1 \\ 1 \end{pmatrix}
-&
-y = \begin{pmatrix} 0 \\ 1 \end{pmatrix}
-&
-&\mbox{ $A$ has $\bot$ and $\top$ in the same row}
-%
-\vspace{5pt} \\
-%
-A = \begin{pmatrix} \bot \\ \top \end{pmatrix}
-&
-b = \begin{pmatrix} -1 \\ 0 \end{pmatrix}
-&
-x = \begin{pmatrix} 0 \end{pmatrix}
-&
-y = \begin{pmatrix} 1 \\ 1 \end{pmatrix}
-&
-&\mbox{ $A$ has $\bot$ and $\top$ in the same column}
-%
-\vspace{5pt} \\
-%
-A = \begin{pmatrix} \top \\ -1 \end{pmatrix}
-&
-b = \begin{pmatrix} \top \\ -1 \end{pmatrix}
-&
-x = \begin{pmatrix} 1 \end{pmatrix}
-&
-y = \begin{pmatrix} 0 \\ 1 \end{pmatrix}
-&
-&\mbox{ $A$ has $\top$ in a row where $b$ has $\top$}
-%
-\vspace{5pt} \\
-%
-A = \begin{pmatrix} \bot \end{pmatrix}
-&
-b = \begin{pmatrix} \bot \end{pmatrix}
-&
-x = \begin{pmatrix} 1 \end{pmatrix}
-&
-y = \begin{pmatrix} 0 \end{pmatrix}
-&
-&\mbox{ $A$ has $\bot$ in a row where $b$ has $\bot$}
-\end{array}
-$$
-
-We also claimed in \sekt{extensions} that changing condition $(-A^T) \* y \le 0$
-to $A^T \* y \ge 0$ in \ref{extendedFarkas} would cause the theorem
-to fail even when $A$ has a single $\bot$ entry. The counterexample is as follows:
-$$
-\begin{array}{cc}
-A =
-\begin{pmatrix}
- \bot \\
- 0
-\end{pmatrix}
-&
-b = \begin{pmatrix} 0 \\ -1 \end{pmatrix}
-\end{array}
-$$
-System $A \* x\le b$ does not have a solution, and neither does system
-$A^T \* y \ge 0$, $b \* y < 0$ (over nonnegative vectors $x,y$).
-
-\subsection{Counterexamples for~\ref{ExtendedLP.strongDuality}}\label{sec:ExtendedLP.strongDuality:counterexamples}
-Recall that \ref{ExtendedLP.strongDuality} is formulated for {\em valid} LPs,
-and the definition of a valid LP has six conditions.
-In this section we show that omitting any of these six conditions
-makes the theorem false.
-
-Let us consider the following six LPs over $F_\infty$;
-all of them are written in the format $P=(A,b,c)$.
-$$
-\begin{array}{c@{\hspace{100pt}}c}
-P_1 = \left(
- \begin{pmatrix} \bot \\ \top \end{pmatrix},
- \begin{pmatrix} -1 \\ 0 \end{pmatrix},
- \begin{pmatrix} 0 \end{pmatrix}
-\right)
-&
-D_1 = \left(
- \begin{pmatrix} \top & \bot \end{pmatrix},
- \begin{pmatrix} 0 \end{pmatrix},
- \begin{pmatrix} -1 \\ 0 \end{pmatrix}
-\right)
-\vspace{5pt} \\
-P_2=\left(
- \begin{pmatrix} \bot \end{pmatrix},
- \begin{pmatrix} \bot \end{pmatrix},
- \begin{pmatrix} 0 \end{pmatrix}
-\right)
-&
-D_2=\left(
- \begin{pmatrix} \top \end{pmatrix},
- \begin{pmatrix} 0 \end{pmatrix},
- \begin{pmatrix} \bot \end{pmatrix}
-\right)
-\vspace{5pt} \\
-P_3=\left(
- \begin{pmatrix} \top \\ -1 \end{pmatrix},
- \begin{pmatrix} \top \\ -1 \end{pmatrix},
- \begin{pmatrix} 0 \end{pmatrix}
-\right)
-&
-D_3=\left(
- \begin{pmatrix} \bot & 1 \end{pmatrix},
- \begin{pmatrix} 0 \end{pmatrix},
- \begin{pmatrix} \top \\ -1 \end{pmatrix}
-\right)
-\end{array}
-$$
-It can be checked that $D_i$ is the dual of $P_i$ for $i=1,2,3$.
-Furthermore, strong duality fails in all cases, since
-the optimum of $P_i$ is $0$ and the optimum of $D_i$ is $\bot$ for $i=1,2,3$.
-Each of $P_1,D_1,P_2,D_2,P_3,D_3$ violates exactly one of the conditions
-in the definition of a valid LP.
-
-
-\section {Related work}
-
-Isabelle \cite{Farkas-AFP} \cite{Linear-AFP} \cite{Duality-AFP}
-Rocq \cite{Simplex-Coq}
-
-In Lean, it would be possible to prove Farkas for reals using the
-Hahn-Banach separation theorem. However, we do not yet know that
-the set of feasible solutions is closed.
-
-TODO more about existing formalizations of Farkas-like theorems.
-
-
-\section {Conclusion}
-
-We formally verified several Farkas-like theorems in Lean 4.
-We extended the existing theory to a new setting where some
-coefficient can carry infinite values. We realized that the
-abstract work with modules over linearly ordered division rings
-and linear maps between them was fairly easy to carry on in
-Lean 4 thanks to the library Mathlib that is perfectly suited
-for such tasks. In contrast, manipulation with matrices got
-tiresome whenever we needed a not-fully-standard operation.
-It turns out Lean 4 cannot automate case analyses unless they
-take place in the ``outer layers'' of formulas. Summation
-over subtypes and summation of conditional expression made
-us developed a lot of ad-hoc machinery which we would have
-preferred to be handled by existing tactics. Another area
-where Lean 4 is not yet helpful is the search for counterexamples.
-Despite these difficulties, we find Lean 4 to be an excellent
-tool for elegant expressions of mathematical theorems and
-for proving them formally.
-
-
-
-\bibliographystyle{plain}
-\bibliography{duality}
-
-
-%\section{Appendix: algebraic hierarchy up to LinearOrderedField in Mathlib}
-%
-%\includegraphics[width=\textwidth]{algebraic_hierarchy.png}
-
-\section{Appendix: dependencies between theorems}
-
-\includegraphics[width=0.9\textwidth]{theorems.png}
-
-Theorems are in black. Selected lemmas are in gray.
-What we consider to be the main theorems are
-denoted by yellow background.
-
-TODO yellow extendedFarkas.
-{\color{blue} TODO some names and some arrows changed.}
-
-
-\end{document}
diff --git a/nonLean/theorems.dot b/nonLean/theorems.dot
deleted file mode 100644
index 5a1f93a..0000000
--- a/nonLean/theorems.dot
+++ /dev/null
@@ -1,45 +0,0 @@
-digraph G {
-
- finFarkasBartl -> fintypeFarkasBartl -> scalarFarkas -> coordinateFarkas -> equalityFarkas -> basicLinearAlgebra_lt -> basicLinearAlgebra
-
- equalityFarkas -> inequalityFarkas -> inequalityFarkas_neg -> extendedFarkas -> "ExtendedLP.weakDuality"
-
- "ExtendedLP.strongDuality_aux" -> "ExtendedLP.strongDuality_of_both_feasible" -> "ExtendedLP.strongDuality_of_prim_feasible"
-
- extendedFarkas -> "ExtendedLP.strongDuality_aux"
-
- extendedFarkas -> "ExtendedLP.unbounded_of_feasible_of_infeasible"
-
- "ExtendedLP.weakDuality" -> "ExtendedLP.infeasible_of_unbounded" -> "ExtendedLP.strongDuality_aux"
-
- "ExtendedLP.weakDuality" -> "ExtendedLP.strongDuality_of_both_feasible"
-
- "ExtendedLP.weakDuality" -> "ExtendedLP.strongDuality_of_prim_feasible" -> "ExtendedLP.strongDuality_of_dual_feasible" -> "ExtendedLP.strongDuality"
-
- "ExtendedLP.strongDuality_of_prim_feasible" -> "ExtendedLP.strongDuality" -> "StandardLP.strongDuality"
-
- "ExtendedLP.unbounded_of_feasible_of_infeasible" -> "ExtendedLP.strongDuality_of_prim_feasible" -> "ExtendedLP.optimum_neq_none" -> "StandardLP.optimum_neq_none"
-
- "ExtendedLP.unbounded_of_feasible_of_neg" -> "ExtendedLP.strongDuality_aux"
-
- "ExtendedLP.unbounded_of_feasible_of_neg" -> "ExtendedLP.unbounded_of_feasible_of_infeasible"
-
- "fintypeFarkasBartl" [style=filled, fillcolor=yellow]
-
- "ExtendedLP.strongDuality" [style=filled, fillcolor=yellow]
-
- "ExtendedLP.infeasible_of_unbounded" [color=grey, fontcolor=grey]
-
- "ExtendedLP.strongDuality_aux" [color=grey, fontcolor=grey]
-
- "ExtendedLP.strongDuality_of_both_feasible" [color=grey, fontcolor=grey]
-
- "ExtendedLP.strongDuality_of_prim_feasible" [color=grey, fontcolor=grey]
-
- "ExtendedLP.strongDuality_of_dual_feasible" [color=grey, fontcolor=grey]
-
- "ExtendedLP.unbounded_of_feasible_of_neg" [color=grey, fontcolor=grey]
-
- "ExtendedLP.unbounded_of_feasible_of_infeasible" [color=grey, fontcolor=grey]
-
-}
\ No newline at end of file
diff --git a/nonLean/theorems.png b/nonLean/theorems.png
deleted file mode 100644
index 54e104a..0000000
Binary files a/nonLean/theorems.png and /dev/null differ