diff --git a/.gitignore b/.gitignore index 5fcd3b3..585128b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,15 @@ .DS_Store/ .DS_Store *.cnf +*.icnf .direnv/ utils/HoleComputation.js +*.drat +*.lrat +cube_checking/cube_gen + +## Nix flake uses .venv directory to set up python stuff +.venv ## Core latex/pdflatex auxiliary files: *.aux diff --git a/utils/6hole.c b/Heule_Scheucher.c similarity index 98% rename from utils/6hole.c rename to Heule_Scheucher.c index 907bb9d..8eb4023 100644 --- a/utils/6hole.c +++ b/Heule_Scheucher.c @@ -1,3 +1,8 @@ +/* + * Encoding used by Heule and Scheucher. + * We sourced this C file directly from them. + */ + #include #include #include diff --git a/README.md b/README.md index cf87965..53c098b 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,14 @@ # EmptyHexagonLean A Lean verification of the _Empty Hexagon Number_, obtained by a [recent breakthrough of Heule and Scheucher](https://arxiv.org/abs/2403.00737). -The main theorem is that every finite set of 30 or more points must contain a convex and empty 6-gon. The proof is based on generating a CNF formula whose unsatisfiability is formally proved to imply the theorem. This repository contains the formalization code, and it allows the generation of the associated CNF. +The main theorem is that every finite set of 30 or more points must contain a convex, empty 6-gon. The proof is based on generating a CNF formula whose unsatisfiability is formally proved to imply the theorem. This repository contains the formalization code, and it allows the generation of the associated CNF. ## Installing Lean -See the [quickstart](https://lean-lang.org/lean4/doc/quickstart.html) guide from the Lean manual, +The main part of this repository is a mathematical formalization written in the Lean 4 theorem prover. +To install Lean on your machine, +see the [quickstart](https://lean-lang.org/lean4/doc/quickstart.html) guide from the Lean manual, or [the extended setup instructions](https://lean-lang.org/lean4/doc/setup.html). @@ -34,17 +36,17 @@ Overall structure: ## Generating CNFs -The project includes scripts for generating the CNFs we formalized against. +The project includes scripts for generating the CNFs we formalized against. These must be run inside the `Lean` subfolder. #### Convex 6-gon -For `n` points: +For `n` points, in the `Lake/` directory, run: ``` lake exe encode gon 6 ``` With `n = 17` this CNF can be solved in under a minute on most machines. #### Convex `k`-hole -For convex `k`-hole in `n` points: +For convex `k`-hole in `n` points, in the `Lake/` directory, run: ``` lake exe encode hole ``` @@ -54,9 +56,78 @@ the CNFs produced can be shown UNSAT instantly. For `k = 6`, `n = 30`, the CNF takes on the order of 17,000 CPU hours to show UNSAT when parallelized using cube&conquer. -The script in `utils/6hole-cube.c` outputs the list of cubes we used to solve `k = 6`, `n = 30`. -We will add scripts soon for reproducing the entire parallel computation. +# Cube and Conquer verification +In order to verify that the resulting formula $F$ is indeed UNSAT, two things ought to be checked: + + 1) **(Tautology proof)** The formula $F$ is UNSAT iff $F \land C_i$ is UNSAT for every $i$. + 2) **(UNSAT proof)** For each cube $C_i$, the formula $F \land C_i$ is UNSAT. + +This repository contains scripts that allow you to check these two proofs. + +The **tautology proof** is implied by checking that $F \land (\bigwedge_i \neg C_i)$ is UNSAT, as per Section 7.3 of [Heule and Scheucher](https://arxiv.org/abs/2403.00737). + +As a first step, install the required dependencies: +1) Clone and compile the SAT solver `cadical` from [its GitHub repository](https://github.com/arminbiere/cadical). Add the `cadical` executable to your `PATH`. +2) Clone and compile the verified proof checker `cake_lpr` from [its GitHub repository](https://github.com/tanyongkiam/cake_lpr). Add the `cake_lpr` executable to your `PATH`. +3) Install the `eznf` and `lark` python libraries with `pip install eznf` and `pip install lark`. + +Next, generate the main CNF by running: +``` +cd Lean +lake exe encode hole 6 30 ../cube_checking/vars.out > ../cube_checking/6-30.cnf +``` + +The **tautology proof** can be verified by navigating to the `cube_checking/` directory and running +``` +./verify_tautology.sh +``` +which boils down to: +``` +gcc 6hole-cube.c -o cube_gen +./cube_gen 0 vars.out > cubes.icnf +python3 cube_join.py -f 6-30.cnf -c cubes.icnf --tautcheck -o taut_if_unsat.cnf +cadical taut_if_unsat.cnf tautology_proof.lrat --lrat # this should take around 30 seconds +cake_lpr taut_if_unsat.cnf tautology_proof.lrat +``` +If proof checking succeeds, you should see the following output: + +``` +s VERIFIED UNSAT +``` + +Moreover, if you do not have the `cadical` and `cake_lpr` executables on your path, you can pass them to the script as: +``` +env CADICAL= CAKELPR= ./verify_tautology.sh +``` + +Because verifying the **UNSAT proof** requires many thousands of CPU hours, +we provide a simple way to verify that any single cube is UNSAT. +To generate the formula + cube number $i$ (assuming the steps for the **tautology proof** have been followed), run `./solve_cube.sh` script in the `cube_checking/` directory, which essentially runs: +``` +./cube_gen vars.out > .tmp_cube_ +python3 cube_join.py -f 6-30.cnf -c .tmp_cube_ -o with_cube_.cnf +cadical with_cube_.cnf proof_cube_.lrat --lrat # this should take a few seconds +cake_lpr with_cube_.cnf proof_cube_.lrat +``` +For example, to solve cube number 42, run: + +``` +./solve_cube.sh 42 +``` +The cubes are indexed from 1 to 312418. + +We **DO NOT RECOMMEND** that the casual verifier runs all cubes, +as doing so would take many thousands of CPU hours. +Our verification was completed in a semi-efficient manner using parallel computation, +which required some custom scripting. +However, the skeptical reviewer with *lots of computation to spare* can replicate our results. +To generate the formula and all cubes for incremental solving, run: +``` +./cube_gen 0 vars.out > cubes.icnf +python3 cube_join.py -f 6-30.cnf -c cubes.icnf --inccnf -o full_computation.icnf +cadical full_computation.icnf # this should take a while :P +``` ## Authors - [Bernardo Subercaseaux](https://bsubercaseaux.github.io/) (Carnegie Mellon University) diff --git a/utils/6hole-cube.c b/cube_checking/6hole-cube.c similarity index 100% rename from utils/6hole-cube.c rename to cube_checking/6hole-cube.c diff --git a/cube_checking/cube_join.py b/cube_checking/cube_join.py new file mode 100644 index 0000000..44b72e7 --- /dev/null +++ b/cube_checking/cube_join.py @@ -0,0 +1,77 @@ +from eznf import modeler +import argparse + +argparser = argparse.ArgumentParser() +argparser.add_argument( + "-f", "--formula", required=True, type=str, help="CNF Formula file" +) +argparser.add_argument("-c", "--cubes", required=True, type=str, help="Cube file") +argparser.add_argument("-o", "--output", required=False, type=str, help="Output file") +argparser.add_argument( + "--tautcheck", action="store_true", help="Checks for a tautology" +) +argparser.add_argument('--inccnf', action="store_true", help="Creates a .icnf file with the formula + cubes") + + +args = argparser.parse_args() + +formula_file = args.formula +cube_file = args.cubes +output_file = args.output +tautcheck = args.tautcheck +icnf = args.inccnf + +encoding = modeler.Modeler() + +def itokenize(line): + """ + takes a line as a string, coming from either a `p cnf` or a `p inccnf` file + returns array of ints without 0 + ignores lines starting with c or p + """ + if len(line) == 0: + return [] + if line[0] == "c" or line[0] == "p": + return [] + if line[:-1] == "\n": + line = line[:-1] + tokens = line.split(" ") + ans = [] + for t in tokens: + try: + t = int(t) + if t != 0: + ans.append(t) + except ValueError: + continue + return ans + + +with open(formula_file, 'r', encoding='utf-8') as sb_file: + for line in sb_file.readlines(): + tkns = itokenize(line) + if len(tkns) > 0: + encoding.add_clause(tkns) + + +cubes = [] +with open(cube_file, 'r', encoding='utf-8') as cb_file: + for line in cb_file.readlines(): + tkns = itokenize(line) + if len(tkns) == 0: + continue + if tautcheck: + encoding.add_clause([-t for t in tkns]) + else: + cubes.append(tkns) + +if tautcheck: + encoding.serialize(output_file) +elif icnf: + encoding.cube_and_conquer(lambda: cubes, output_file) +else: + assert len(cubes) == 1 + cube = cubes[0] + for lit in cube: + encoding.add_clause([lit]) + encoding.serialize(output_file) diff --git a/cube_checking/solve_cube.sh b/cube_checking/solve_cube.sh new file mode 100755 index 0000000..11eb441 --- /dev/null +++ b/cube_checking/solve_cube.sh @@ -0,0 +1,9 @@ +#!/bin/sh +# Receives the desired cube number as $1 +gcc 6hole-cube.c -o cube_gen +./cube_gen $1 vars.out > .tmp_cube_$1 +python3 cube_join.py -f 6-30.cnf -c .tmp_cube_$1 -o with_cube_$1.cnf +${CADICAL:-cadical} with_cube_$1.cnf proof_cube_$1.lrat --lrat +${CAKELPR:-cake_lpr} with_cube_$1.cnf proof_cube_$1.lrat + +rm with_cube_$1.cnf proof_cube_$1.lrat .tmp_cube_$1 diff --git a/cube_checking/verify_tautology.sh b/cube_checking/verify_tautology.sh new file mode 100755 index 0000000..395b0b8 --- /dev/null +++ b/cube_checking/verify_tautology.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +gcc 6hole-cube.c -o cube_gen +./cube_gen 0 vars.out > cubes.icnf +python3 cube_join.py -f 6-30.cnf -c cubes.icnf --tautcheck -o taut_if_unsat.cnf +${CADICAL:-cadical} taut_if_unsat.cnf tautology_proof.lrat --lrat +${CAKELPR:-cake_lpr} taut_if_unsat.cnf tautology_proof.lrat diff --git a/flake.nix b/flake.nix index d4d1112..cd8760d 100644 --- a/flake.nix +++ b/flake.nix @@ -14,6 +14,8 @@ texlive.combined.scheme-full texlab fontconfig + gcc + python3 ]; FONTCONFIG_FILE = pkgs.makeFontsConf { fontDirectories = [ @@ -23,6 +25,18 @@ pkgs.iosevka ]; }; + + # Create a python virtual environment with eznf and lark + venvDir = "./.venv"; + buildInputs = with pkgs.python3Packages; [ + python + venvShellHook + ]; + + postVenvCreation = '' + unset SOURCE_DATE_EPOCH + pip install eznf lark + ''; }; }); } diff --git a/utils/HoleComputation.ts b/utils/HoleComputation.ts deleted file mode 100644 index d19bdc0..0000000 --- a/utils/HoleComputation.ts +++ /dev/null @@ -1,111 +0,0 @@ -export { }; -const points = [ - [1, 1260], - [16, 743], - [22, 531], - [37, 0], - [306, 592], - [310, 531], - [366, 552], - [371, 487], - [374, 525], - [392, 575], - [396, 613], - [410, 539], - [416, 550], - [426, 526], - [434, 552], - [436, 535], - [446, 565], - [449, 518], - [450, 498], - [453, 542], - [458, 526], - [489, 537], - [492, 502], - [496, 579], - [516, 467], - [552, 502], - [754, 697], - [777, 194], - [1259, 320], -]; -const r = 6; - -type point = { x: number, y: number }; -const allPoints: point[] = points.map(p => ({ x: p[0], y: p[1] })); -for (let idP = 0; idP < allPoints.length; idP++) { - const referencePoint = allPoints[idP]; - - const ccw = (a: point, b: point, c: point) => - (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) > 0; - - // computeStarPolygon - // first remove points to the left of referencePoint. - const filteredPoints = allPoints.filter(p => p.x > referencePoint.x); - // now sort by angle with respect to referencePoint. - const sortedPoints = filteredPoints.sort((p1, p2) => { - let angleP1 = Math.atan2(p1.y - referencePoint.y, p1.x - referencePoint.x); - let angleP2 = Math.atan2(p2.y - referencePoint.y, p2.x - referencePoint.x); - return angleP1 - angleP2; - }); - const pointSequence = [referencePoint, ...sortedPoints]; - - // computeVisibilityGraph - let edges: number[][] = []; - let queues: number[][] = pointSequence.map(() => []); - for (let i = 0; i < pointSequence.length - 1; i++) { - const proceed = function (i: number, j: number) { - let Q_i = queues[i]; - let Q_j = queues[j]; - // is Q_i[0] -> i -> j ccw? - - while (Q_i.length > 0) { - let ccw_ij = ccw( - pointSequence[Q_i[0]], - pointSequence[i], - pointSequence[j]); - if (!ccw_ij) { - break; - } - proceed(Q_i[0], j); - Q_i.shift(); - } - // discard edges including point p. - if (i != 0) edges.push([i, j]); - Q_j.push(i); - } - proceed(i, i + 1); - } - - // maxChain - let Lmap: { [index: string]: number[] } = {}; - for (let i = pointSequence.length - 1; i >= 0; --i) { - let pIncoming = edges.filter(edge => edge[1] === i); - let pOutgoing = edges.filter(edge => edge[0] === i); - let l = pOutgoing.length - 1; - let m: number[] = [i]; - for (let j = pIncoming.length - 1; j >= 0; --j) { - Lmap[String(pIncoming[j])] = [pIncoming[j][0], ...m]; - while (l >= 0) { - let ccw_jl = ccw( - pointSequence[pIncoming[j][0]], - pointSequence[pIncoming[j][1]], - pointSequence[pOutgoing[l][1]]); - if (!ccw_jl) { - break; - } - if (Lmap[String(pOutgoing[l])].length > m.length) { - m = Lmap[String(pOutgoing[l])]; - const n = [pIncoming[j][0], ...m]; - Lmap[String(pIncoming[j])] = n; - if (m.length >= r - 2) { - console.log(`Found empty convex polygon: ${idP}:${n}`); - } - } - l -= 1; - } - } - } - // console.log(Lmap) -} diff --git a/utils/cubing_taut.py b/utils/cubing_taut.py deleted file mode 100644 index 0f58863..0000000 --- a/utils/cubing_taut.py +++ /dev/null @@ -1,41 +0,0 @@ -from eznf import modeler -import argparse - -argparser = argparse.ArgumentParser() -argparser.add_argument("-c", "--cubes", required=True, type=str, help="Cube file") -argparser.add_argument("-s", "--symmetry", required=True, type=str, help="Symmetry breaking clause file") - -args = argparser.parse_args() -cube_file = args.cubes -symmetry_breaking_file = args.symmetry - -encoding = modeler.Modeler() - -def itokenize(line): - if line[:-1] == '\n': - line = line[:-1] - tokens = line.split(' ') - ans = [] - for t in tokens: - try: - t = int(t) - if t == 0: continue - ans.append(t) - except: - continue - return ans - - -with open(cube_file, 'r') as cb_file: - for line in cb_file.readlines(): - tkns = itokenize(line) - assert len(tkns) > 0 - encoding.add_clause([-t for t in tkns]) - -with open(symmetry_breaking_file, 'r') as sb_file: - for line in sb_file.readlines(): - tkns = itokenize(line) - assert len(tkns) > 0 - encoding.add_clause(tkns) - -encoding.serialize("cube_tautology.cnf") diff --git a/utils/empty_triangle_encoder.py b/utils/empty_triangle_encoder.py deleted file mode 100644 index 017d434..0000000 --- a/utils/empty_triangle_encoder.py +++ /dev/null @@ -1,43 +0,0 @@ -from eznf import modeler # this is my library for encodings, it's similar to PySAT. -import itertools - -N = 4 # number of points - -def encode(N): - triples = list(itertools.combinations(range(N), 3)) # all possible ordered triples of points - quadruples = list(itertools.combinations(range(N), 4)) # all possible ordered quadruples of points - - Z = modeler.Modeler() # create a modeler object, just the way of using my library - - for triple in triples: - a, b, c = triple - Z.add_var(f's_{a, b, c}', description=f"signotope of points {a}, {b}, {c}") - -# for quadruple in quadruples: -# a, b, c, d = quadruple -# Z.constraint(f'-s_{a, b, c} or -s_{a, c, d} or s_{a, b, d}') -# Z.constraint(f'-s_{a, b, c} or -s_{b, c, d} or s_{a, c, d}') -# Z.constraint(f's_{a, b, c} or s_{a, c, d} or -s_{a, b, d}') -# Z.constraint(f's_{a, b, c} or s_{b, c, d} or -s_{a, c, d}') - - for triple in triples: - a, b, c = triple - for x in range(a + 1, b): # x < b - Z.add_var(f'inside_{x, a, b, c}', description=f"{x} is inside the triangle {a, b, c}") - Z.constraint(f"inside_{x, a, b, c} <-> ((s_{a, b, c} <-> s_{a, x, c}) & (-s_{a, x, b} <-> s_{a, b, c}))") - for x in range(b+1, c): # x > b - Z.add_var(f'inside_{x, a, b, c}', description=f"{x} is inside the triangle {a, b, c}") - Z.constraint(f"inside_{x, a, b, c} <-> ((s_{a, b, c} <-> s_{a, x, c}) & (-s_{b, x, c} <-> s_{a, b, c}))") - # a,b,c is a hole iff it has no points inside. - Z.add_var(f'hole_{a, b, c}', description=f"{a, b, c} is a hole") - x_range = list(range(a+1, b)) + list(range(b+1, c)) - for x in x_range: - Z.constraint(f"inside_{x, a, b, c} -> -hole_{a, b, c}") - Z.add_clause([f"hole_{triple}"] + [f"inside_{x, a, b, c}" for x in x_range]) - - for triple in triples: - Z.add_clause([f"-hole_{triple}"]) - return Z - -encoding = encode(N) -encoding.serialize(f'{N}-points-wo-3-hole.cnf') diff --git a/utils/inefficient_encoder.py b/utils/inefficient_encoder.py deleted file mode 100644 index fa11b7d..0000000 --- a/utils/inefficient_encoder.py +++ /dev/null @@ -1,91 +0,0 @@ -from eznf import modeler # this is my library for encodings, it's similar to PySAT. -import itertools - -N = 5 # number of points -k = 4 # gon-arity, (i.e., k = 4 means empty convex quadrilaterals). -# this encoding is good enough for getting SAT with N=9, k= 5 and UNSAT with N=10, k=5 -# (note that in this case the mere encoding takes a few seconds, this is due to inefficiencies in my parsing code) - -def encode(N, k): - triples = list(itertools.combinations(range(N), 3)) # all possible ordered triples of points - quadruples = list(itertools.combinations(range(N), 4)) # all possible ordered quadruples of points - - Z = modeler.Modeler() # create a modeler object, just the way of using my library - - for triple in triples: - a, b, c = triple - Z.add_var(f's_{a, b, c}', description=f"signotope of points {a}, {b}, {c}") - - # signotope axioms - for quadruple in quadruples: - a, b, c, d = quadruple - # 1 - Z.constraint(f's_{a, b, c} or -s_{a, b, d} or s_{a, c, d}') - Z.constraint(f'-s_{a, b, c} or s_{a, b, d} or -s_{a, c, d}') - # 2 - Z.constraint(f's_{a, b, c} or -s_{a, c, d} or s_{b, c, d}') - Z.constraint(f'-s_{a, b, c} or s_{a, c, d} or -s_{b, c, d}') - # 3 - Z.constraint(f's_{a, b, c} or -s_{a, b, d} or s_{b, c, d}') - Z.constraint(f'-s_{a, b, c} or s_{a, b, d} or -s_{b, c, d}') - # 4 - Z.constraint(f's_{a, b, d} or -s_{a, c, d} or s_{b, c, d}') - Z.constraint(f'-s_{a, b, d} or s_{a, c, d} or -s_{b, c, d}') - - # inside-ness variables - for triple in triples: - a, b, c = triple - - - - # we introduce hole variable to denote this triple being a hole - Z.add_var(f"hole_{a, b, c}", f"{a, b, c} is an empty triangle") - - # given points are x-sorted, for x to be inside the abc triangle, we need a < x < c. - # we also need to case on whether x < b or x > b - for x in range(a + 1, b): # x < b - Z.add_var(f'inside_{x, a, b, c}', description=f"{x} is inside the triangle {a, b, c}") - - # for x to be inside a, b, c, we need - # s_{a, b, c} to have the same sign as s_{a, x, c} - # and s_{a, x, b} to have the opposite sign to them. - Z.constraint(f"inside_{x, a, b, c} <-> ((s_{a, b, c} <-> s_{a, x, c}) & (-s_{a, x, b} <-> s_{a, b, c}))") - # this can be expressed with 6 clauses :) letting my library do the work for simplicity. - - - for x in range(b+1, c): # x > b - Z.add_var(f'inside_{x, a, b, c}', description=f"{x} is inside the triangle {a, b, c}") - - # for x to be inside a, b, c, we need - # s_{a, b, c} to have the same sign as s_{a, x, c} - # and s_{b, x, c} to have the opposite sign to them. - Z.constraint(f"inside_{x, a, b, c} <-> ((s_{a, b, c} <-> s_{a, x, c}) & (-s_{b, x, c} <-> s_{a, b, c}))") - - # a,b,c is a hole iff it has no points inside. - for x in list(range(a+1, b)) + list(range(b+1, c)): - Z.constraint(f"inside_{x, a, b, c} -> -hole_{a, b, c}") - Z.add_clause([f"hole_{a, b, c}"] + [f"inside_{x,a,b,c}" for x in list(range(a+1, b)) + list(range(b+1, c))]) - - # k points {p_1, p_2, ..., p_k} determine a convex k-hole - # if and only if all triangles p_a, p_b, p_c are holes for 1 <= a < b < c <= k - # - # this is because "k points determine a convex k-hole <-> their convex hull has k points" - # <-> all k points are in the convex hull" - # <-> no point is inside a triangle from the set - # <-> all triangles are holes - - k_tuples = list(itertools.combinations(range(N), k)) # this is only part that depends on k in this encoding. - for k_tuple in k_tuples: - Z.add_var(f"convex_k_hole_{k_tuple}") - Z.constraint(f"-convex_k_hole_{k_tuple}") - # as we want to FORBID k-holes, - # we need that for every k_tuple there is a non-empty triangle - triangles = list(itertools.combinations(list(k_tuple), 3)) - Z.add_clause([f"convex_k_hole_{k_tuple}"] + [f"-hole_{a, b, c}" for a,b,c in triangles]) - for a,b,c in triangles: - Z.add_clause([f"-convex_k_hole_{k_tuple}"] + [f"hole_{a, b, c}"]) - return Z - - -encoding = encode(N, k) -encoding.serialize(f'{N}-points-wo-{k}-hole.cnf') diff --git a/utils/symm_breaking_fig.py b/utils/symm_breaking_fig.py deleted file mode 100644 index 8db8a03..0000000 --- a/utils/symm_breaking_fig.py +++ /dev/null @@ -1,67 +0,0 @@ -import math - - -L_1 = [(-1.3, 0.25), (-1.3, 0.9), (-0.7, -0.45), (-0.35, 0.35), (-0.35, -0.8), (0.6, 0.5), (0.9, -0.6), (1.4, 1.1), (0.6, -1.1)] - -def to_tikz(L): - for i in range(len(L)): - print(rf"\coordinate (c{i+1}) at {L[i]};") - - for i in range(len(L)): - print(rf"\node[draw, circle, fill=blue, text=white, inner sep=0pt, minimum size=5pt] (p{i+1}) at {L[i]} {{\scriptsize ${i+1}$}};") - - -def rotate_by(p, theta): - return (p[0]*math.cos(theta) - p[1]*math.sin(theta), p[0]*math.sin(theta) + p[1]*math.cos(theta)) - -def step_1(L): - theta = 0.785398 - def rot(p): - return rotate_by(p, theta) - return list(map(rot, L)) - -def trans_by(p, t): - return (p[0]+t[0], p[1]+t[1]) - -def step_2(L): - lmi = -1 - lmx = 1e9 - for i in range(len(L)): - if L[i][0] < lmx: - lmx = L[i][0] - lmi = i - t = (- L[lmi][0], -L[lmi][1]) - return [trans_by(p, t) for p in L] - - -def step_3(L): - ans = [] - for p in L: - if p[0] != 0: - ans.append( (p[1]/p[0], 1/p[0])) - else: - ans.append( (-1.2, 3.85) ) - return ans - -def step_4(L): - return sorted(L, key=lambda p: p[0]) - -to_tikz(L_1) - -no_same_x = step_1(L_1) -print('\nno same x:') -to_tikz(no_same_x) - -translated = step_2(no_same_x) -print('\ntranslated:') -to_tikz(translated) - -projected = step_3(translated) -print('\nprojected:') -to_tikz(projected) - - -sorted_pts = step_4(projected) -print('\nsorted:') -to_tikz(sorted_pts) -