diff --git a/gerrychain/metrics/compactness.py b/gerrychain/metrics/compactness.py index 8ac63329..63c872df 100644 --- a/gerrychain/metrics/compactness.py +++ b/gerrychain/metrics/compactness.py @@ -1,15 +1,36 @@ import math +from typing import Dict -def compute_polsby_popper(area, perimeter): +def compute_polsby_popper(area: float, perimeter: float) -> float: + """ + Computes the Polsby-Popper score for a single district. + + :param area: The area of the district + :type area: float + :param perimeter: The perimeter of the district + :type perimeter: float + + :returns: The Polsby-Popper score for the district + :rtype: float + """ try: return 4 * math.pi * area / perimeter ** 2 except ZeroDivisionError: return math.nan -def polsby_popper(partition): - """Computes Polsby-Popper compactness scores for each district in the partition. +# Partition type hint left out due to circular import +# def polsby_popper(partition: Partition) -> Dict[int, float]: +def polsby_popper(partition) -> Dict[int, float]: + """ + Computes Polsby-Popper compactness scores for each district in the partition. + + :param partition: The partition to compute scores for + :type partition: Partition + + :returns: A dictionary mapping each district ID to its Polsby-Popper score + :rtype: Dict[int, float] """ return { part: compute_polsby_popper( diff --git a/gerrychain/metrics/partisan.py b/gerrychain/metrics/partisan.py index ac802336..d1c32725 100644 --- a/gerrychain/metrics/partisan.py +++ b/gerrychain/metrics/partisan.py @@ -1,11 +1,26 @@ +""" +The partisan metrics in this file are later used in the module +gerrychain.updaters.election.py. Thus, all of the election +results objects here are implicilty typed as ElectionResults, +but cannot be given an explicit type annotation due to problems +with circular imports. +""" + import numpy +from typing import Tuple -def mean_median(election_results): +def mean_median(election_results) -> float: """ Computes the Mean-Median score for the given ElectionResults. A positive value indicates an advantage for the first party listed in the Election's parties_to_columns dictionary. + + :param election_results: An ElectionResults object + :type election_results: ElectionResults + + :returns: The Mean-Median score for the given ElectionResults + :rtype: float """ first_party = election_results.election.parties[0] data = election_results.percents(first_party) @@ -13,7 +28,7 @@ def mean_median(election_results): return numpy.median(data) - numpy.mean(data) -def mean_thirdian(election_results): +def mean_thirdian(election_results) -> float: """ Computes the Mean-Median score for the given ElectionResults. A positive value indicates an advantage for the first party listed @@ -21,6 +36,12 @@ def mean_thirdian(election_results): The motivation for this score is that the minority party in many states struggles to win even a third of the seats. + + :param election_results: An ElectionResults object + :type election_results: ElectionResults + + :returns: The Mean-Thirdian score for the given ElectionResults + :rtype: float """ first_party = election_results.election.parties[0] data = election_results.percents(first_party) @@ -31,24 +52,35 @@ def mean_thirdian(election_results): return thirdian - numpy.mean(data) -def efficiency_gap(results): +def efficiency_gap(election_results) -> float: """ Computes the efficiency gap for the given ElectionResults. A positive value indicates an advantage for the first party listed in the Election's parties_to_columns dictionary. + + :param election_results: An ElectionResults object + :type election_results: ElectionResults + + :returns: The efficiency gap for the given ElectionResults + :rtype: float """ - party1, party2 = [results.counts(party) for party in results.election.parties] + party1, party2 = [election_results.counts(party) for party in election_results.election.parties] wasted_votes_by_part = map(wasted_votes, party1, party2) - total_votes = results.total_votes() + total_votes = election_results.total_votes() numerator = sum(waste2 - waste1 for waste1, waste2 in wasted_votes_by_part) return numerator / total_votes -def wasted_votes(party1_votes, party2_votes): +def wasted_votes(party1_votes: int, party2_votes: int) -> Tuple[int, int]: """ Computes the wasted votes for each party in the given race. - :party1_votes: the number of votes party1 received in the race - :party2_votes: the number of votes party2 received in the race + :param party1_votes: the number of votes party1 received in the race + :type party1_votes: int + :param party2_votes: the number of votes party2 received in the race + :type party2_votes: int + + :returns: a tuple of the wasted votes for each party + :rtype: Tuple[int, int] """ total_votes = party1_votes + party2_votes if party1_votes > party2_votes: @@ -60,12 +92,18 @@ def wasted_votes(party1_votes, party2_votes): return party1_waste, party2_waste -def partisan_bias(election_results): +def partisan_bias(election_results) -> float: """ Computes the partisan bias for the given ElectionResults. The partisan bias is defined as the number of districts with above-mean vote share by the first party divided by the total number of districts, minus 1/2. + + :param election_results: An ElectionResults object + :type election_results: ElectionResults + + :returns: The partisan bias for the given ElectionResults + :rtype: float """ first_party = election_results.election.parties[0] party_shares = numpy.array(election_results.percents(first_party)) @@ -74,26 +112,24 @@ def partisan_bias(election_results): return (above_mean_districts / len(party_shares)) - 0.5 -def partisan_gini(election_results): +def partisan_gini(election_results) -> float: """ Computes the partisan Gini score for the given ElectionResults. The partisan Gini score is defined as the area between the seats-votes curve and its reflection about (.5, .5). + + For more information on the computation, see Definition 1 in: + https://arxiv.org/pdf/2008.06930.pdf + + :param election_results: An ElectionResults object + :type election_results: ElectionResults + + :returns: The partisan Gini score for the given ElectionResults + :rtype: float """ # For two parties, the Gini score is symmetric--it does not vary by party. party = election_results.election.parties[0] - # To find seats as a function of votes, we assume uniform partisan swing. - # That is, if the statewide popular vote share for a party swings by some - # delta, the vote share for that party swings by that delta in each - # district. - # We calculate the necessary delta to shift the district with the highest - # vote share for the party to a vote share of 0.5. This delta, subtracted - # from the original popular vote share, gives the minimum popular vote - # share that yields 1 seat to the party. - # We repeat this process for the district with the second-highest vote - # share, which gives the minimum popular vote share yielding 2 seats, - # and so on. overall_result = election_results.percent(party) race_results = sorted(election_results.percents(party), reverse=True) seats_votes = [overall_result - r + 0.5 for r in race_results] @@ -101,10 +137,9 @@ def partisan_gini(election_results): # Apply reflection of seats-votes curve about (.5, .5) reflected_sv = reversed([1 - s for s in seats_votes]) # Calculate the unscaled, unsigned area between the seats-votes curve - # and its reflection. For each possible number of seats attained, we find - # the area of a rectangle of unit height, with a width determined by the - # horizontal distance between the curves at that number of seats. + # and its reflection. unscaled_area = sum(abs(s - r) for s, r in zip(seats_votes, reflected_sv)) + # We divide by area by the number of seats to obtain a partisan Gini score # between 0 and 1. return unscaled_area / len(race_results) diff --git a/gerrychain/partition/assignment.py b/gerrychain/partition/assignment.py index d6b47d77..4d5bbd51 100644 --- a/gerrychain/partition/assignment.py +++ b/gerrychain/partition/assignment.py @@ -1,12 +1,14 @@ from collections import defaultdict from collections.abc import Mapping -from typing import Dict +from typing import Dict, Union, Optional, DefaultDict, Set +from ..graph import Graph import pandas class Assignment(Mapping): - """An assignment of nodes into parts. + """ + An assignment of nodes into parts. The goal of :class:`Assignment` is to provide an interface that mirrors a dictionary (what we have been using for assigning nodes to districts) while making it @@ -20,7 +22,21 @@ class Assignment(Mapping): 'mapping' ] - def __init__(self, parts, mapping=None, validate=True): + def __init__(self, parts: Dict, mapping: Dict = None, validate: bool = True) -> None: + """ + :param parts: dictionary mapping partition assignments to sets or + frozensets of nodes + :type parts: Dict + :param mapping: dictionary mapping nodes to partition assignments + :type mapping: Dict, optional + :param validate: whether to validate the assignment + :type validate: bool, optional + + :returns: None + + :raises ValueError: if the keys of ``parts`` are not unique + :raises TypeError: if the values of ``parts`` are not frozensets + """ if validate: number_of_keys = sum(len(keys) for keys in parts.values()) number_of_unique_keys = len(set().union(*parts.values())) @@ -51,13 +67,15 @@ def __getitem__(self, node): return self.mapping[node] def copy(self): - """Returns a copy of the assignment. + """ + Returns a copy of the assignment. Does not duplicate the frozensets of nodes, just the parts dictionary. """ return Assignment(self.parts.copy(), self.mapping.copy(), validate=False) def update_flows(self, flows): - """Update the assignment for some nodes using the given flows. + """ + Update the assignment for some nodes using the given flows. """ for part, flow in flows.items(): # Union between frozenset and set returns an object whose type @@ -68,7 +86,8 @@ def update_flows(self, flows): self.mapping[node] = part def items(self): - """Iterate over ``(node, part)`` tuples, where ``node`` is assigned to ``part``. + """ + Iterate over ``(node, part)`` tuples, where ``node`` is assigned to ``part``. """ yield from self.mapping.items() @@ -78,12 +97,16 @@ def keys(self): def values(self): yield from self.mapping.values() - def update_parts(self, new_parts): - """Update some parts of the assignment. Does not check that every node is + def update_parts(self, new_parts: Dict) -> None: + """ + Update some parts of the assignment. Does not check that every node is still assigned to a part. - :param dict new_parts: dictionary mapping (some) parts to their new sets or + :param new_parts: dictionary mapping (some) parts to their new sets or frozensets of nodes + :type new_parts: Dict + + :returns: None """ for part, nodes in new_parts.items(): self.parts[part] = frozenset(nodes) @@ -91,49 +114,105 @@ def update_parts(self, new_parts): for node in nodes: self.mapping[node] = part - def to_series(self): - """Convert the assignment to a :class:`pandas.Series`.""" + def to_series(self) -> pandas.Series: + """ + :returns: The assignment as a :class:`pandas.Series`. + :rtype: pandas.Series + """ groups = [ pandas.Series(data=part, index=nodes) for part, nodes in self.parts.items() ] return pandas.concat(groups) - def to_dict(self): - """Convert the assignment to a ``{node: part}`` dictionary.""" + def to_dict(self) -> Dict: + """ + :returns: The assignment as a ``{node: part}`` dictionary. + :rtype: Dict + """ return self.mapping @classmethod - def from_dict(cls, assignment): - """Create an :class:`Assignment` from a dictionary. This is probably the method you want + def from_dict(cls, assignment: Dict) -> "Assignment": + """ + Create an :class:`Assignment` from a dictionary. This is probably the method you want to use to create a new assignment. This also works for :class:`pandas.Series`. + + :param assignment: dictionary mapping nodes to partition assignments + :type assignment: Dict + + :returns: A new instance of :class:`Assignment` with the same assignments as the + passed-in dictionary. + :rtype: Assignment """ parts = {part: frozenset(keys) for part, keys in level_sets(assignment).items()} return cls(parts) -def get_assignment(assignment, graph=None): - if isinstance(assignment, str): +def get_assignment(part_assignment: Union[str, Dict, Assignment], + graph: Optional[Graph] = None + ) -> Assignment: + """ + Either extracts an :class:`Assignment` object from the input graph + using the provided key or attempts to convert part_assignment into + an :class:`Assignment` object. + + :param part_assignment: A node attribute key, dictionary, or + :class:`Assignment` object corresponding to the desired assignment. + :type part_assignment: str + :param graph: The graph from which to extract the assignment. + :type graph: Optional[Graph], optional + + :returns: An :class:`Assignment` object containing the assignment + corresponding to the part_assignment input + :rtype: Assignment + + :raises TypeError: If the part_assignment is a string and the graph + is not provided. + :raises TypeError: If the part_assignment is not a string or dictionary. + """ + if isinstance(part_assignment, str): if graph is None: raise TypeError( - "You must provide a graph when using a node attribute for the assignment" + "You must provide a graph when using a node attribute for the part_assignment" ) return Assignment.from_dict( - {node: graph.nodes[node][assignment] for node in graph} + {node: graph.nodes[node][part_assignment] for node in graph} ) - elif callable(getattr(assignment, "items", None)): - return Assignment.from_dict(assignment) - elif isinstance(assignment, Assignment): - return assignment + # Check if assignment is a dict or a mapping type + elif callable(getattr(part_assignment, "items", None)): + return Assignment.from_dict(part_assignment) + elif isinstance(part_assignment, Assignment): + return part_assignment else: raise TypeError("Assignment must be a dict or a node attribute key") -def level_sets(mapping: dict, container=set): - """Inverts a dictionary. ``{key: value}`` becomes - ``{value: }``.""" +def level_sets(mapping: Dict, container: Set = set) -> DefaultDict: + """ + Inverts a dictionary. ``{key: value}`` becomes + ``{value: }``. + + :param mapping: A dictionary to invert. Keys and values can be of any type. + :type mapping: Dict + :param container: A container type used to collect keys that map to the same value. + By default, it is a set. + :type container: Set + + :return: A dictionary where each key is a value from the original dictionary, + and the corresponding value is a container (by default, a set) of keys from + the original dictionary that mapped to this value. + :rtype: DefaultDict + + Example usage:: + + .. code_block:: python + + >>> level_sets({'a': 1, 'b': 1, 'c': 2}) + defaultdict(, {1: {'a', 'b'}, 2: {'c'}}) + """ sets: Dict = defaultdict(container) for source, target in mapping.items(): sets[target].add(source) diff --git a/gerrychain/partition/partition.py b/gerrychain/partition/partition.py index f4ae0703..47f5d083 100644 --- a/gerrychain/partition/partition.py +++ b/gerrychain/partition/partition.py @@ -111,7 +111,7 @@ def flip(self, flips): on this partition. :param flips: dictionary assigning nodes of the graph to their new districts - :return: the new :class:`Partition` + :returns: the new :class:`Partition` :rtype: Partition """ return self.__class__(parent=self, flips=flips) diff --git a/gerrychain/partition/subgraphs.py b/gerrychain/partition/subgraphs.py index e29f4089..080c2dc4 100644 --- a/gerrychain/partition/subgraphs.py +++ b/gerrychain/partition/subgraphs.py @@ -1,24 +1,63 @@ +from typing import List, Any, Tuple +from ..graph import Graph + + class SubgraphView: + """ + A view for accessing subgraphs of :class:`Graph` objects. + + This class makes use of a subgraph cache to avoid recomputing subgraphs + which can speed up computations when working with district assignments + within a partition class. + + :ivar graph: The parent graph from which subgraphs are derived. + :type graph: Graph + :ivar parts: A dictionary mapping keys to subsets of nodes in the graph. + :type parts: List[List[Any]] + :ivar subgraphs_cache: Cache to store subgraph views for quick access. + :type subgraphs_cache: Dict + """ __slots__ = [ "graph", "parts", "subgraphs_cache" ] - def __init__(self, graph, parts): + def __init__(self, graph: Graph, parts: List[List[Any]]) -> None: + """ + :param graph: The parent graph from which subgraphs are derived. + :type graph: Graph + :param parts: A list of lists of nodes corresponding the different + parts of the partition of the graph + :type parts: List[List[Any]] + + :returns: None + """ self.graph = graph self.parts = parts self.subgraphs_cache = {} - def __getitem__(self, part): + def __getitem__(self, part: int) -> Graph: + """ + :param part: The the id of the partition to return the subgraph for. + :type part: int + + :returns: The subgraph of the parent graph corresponding to the + partition with id `part`. + :rtype: Graph + """ if part not in self.subgraphs_cache: self.subgraphs_cache[part] = self.graph.subgraph(self.parts[part]) return self.subgraphs_cache[part] - def __iter__(self): + def __iter__(self) -> Graph: for part in self.parts: yield self[part] - def items(self): + def items(self) -> Tuple[int, Graph]: for part in self.parts: yield part, self[part] + + def __repr__(self) -> str: + return (f"") diff --git a/gerrychain/proposals/proposals.py b/gerrychain/proposals/proposals.py index 975c95af..b098c8a3 100644 --- a/gerrychain/proposals/proposals.py +++ b/gerrychain/proposals/proposals.py @@ -1,8 +1,18 @@ import random +# from typing import TypeVar +# Partition = TypeVar("Partition") +from ..partition import Partition -def propose_any_node_flip(partition): - """Flip a random node (not necessarily on the boundary) to a random part +def propose_any_node_flip(partition: Partition) -> Partition: + """ + Flip a random node (not necessarily on the boundary) to a random part + + :param partition: The current partition to propose a flip from. + :type partition: Partition + + :returns: A possible next `~gerrychain.Partition` + :rtype: Partition """ node = random.choice(tuple(partition.graph)) @@ -11,11 +21,15 @@ def propose_any_node_flip(partition): return partition.flip({node: newpart}) -def propose_flip_every_district(partition): - """Proposes a random boundary flip for each district in the partition. +def propose_flip_every_district(partition: Partition) -> Partition: + """ + Proposes a random boundary flip for each district in the partition. - :param partition: The current partition to propose a flip from. - :return: a proposed next `~gerrychain.Partition` + :param partition: The current partition to propose the flips from. + :type partition: Partition + + :returns: A possible next `~gerrychain.Partition` + :rtype: Partition """ flips = dict() @@ -31,11 +45,15 @@ def propose_flip_every_district(partition): return partition.flip(flips) -def propose_chunk_flip(partition): - """Chooses a random boundary node and proposes to flip it and all of its neighbors +def propose_chunk_flip(partition: Partition) -> Partition: + """ + Chooses a random boundary node and proposes to flip it and all of its neighbors :param partition: The current partition to propose a flip from. - :return: a proposed next `~gerrychain.Partition` + :type partition: Partition + + :returns: A possible next `~gerrychain.Partition` + :rtype: Partition """ flips = dict() @@ -56,11 +74,15 @@ def propose_chunk_flip(partition): return partition.flip(flips) -def propose_random_flip(partition): - """Proposes a random boundary flip from the partition. +def propose_random_flip(partition: Partition) -> Partition: + """ + Proposes a random boundary flip from the partition. :param partition: The current partition to propose a flip from. - :return: a proposed next `~gerrychain.Partition` + :type partition: Partition + + :returns: A possible next `~gerrychain.Partition` + :rtype: Partition """ if len(partition["cut_edges"]) == 0: return partition @@ -71,8 +93,9 @@ def propose_random_flip(partition): return partition.flip(flip) -def slow_reversible_propose_bi(partition): - """Proposes a random boundary flip from the partition in a reversible fasion +def slow_reversible_propose_bi(partition: Partition) -> Partition: + """ + Proposes a random boundary flip from the partition in a reversible fasion for bipartitions by selecting a boundary node at random and uniformly picking one of its neighboring parts. For k-partitions this is not uniform since there might be multiple parts next to a single node. @@ -80,7 +103,10 @@ def slow_reversible_propose_bi(partition): Temporary version until we make an updater for this set. :param partition: The current partition to propose a flip from. - :return: a proposed next `~gerrychain.Partition` + :type partition: Partition + + :returns: A possible next `~gerrychain.Partition` + :rtype: Partition """ b_nodes = {x[0] for x in partition["cut_edges"]}.union({x[1] for x in partition["cut_edges"]}) @@ -97,14 +123,18 @@ def slow_reversible_propose_bi(partition): flip = propose_random_flip -def slow_reversible_propose(partition): - """Proposes a random boundary flip from the partition in a reversible fasion +def slow_reversible_propose(partition: Partition) -> Partition: + """ + Proposes a random boundary flip from the partition in a reversible fasion by selecting uniformly from the (node, flip) pairs. Temporary version until we make an updater for this set. :param partition: The current partition to propose a flip from. - :return: a proposed next `~gerrychain.Partition` + :type partition: Partition + + :returns: A possible next `~gerrychain.Partition` + :rtype: Partition """ b_nodes = {(x[0], partition.assignment.mapping[x[1]]) for x in partition["cut_edges"] diff --git a/gerrychain/proposals/spectral_proposals.py b/gerrychain/proposals/spectral_proposals.py index 77664dcf..4d846cec 100644 --- a/gerrychain/proposals/spectral_proposals.py +++ b/gerrychain/proposals/spectral_proposals.py @@ -1,14 +1,34 @@ import networkx as nx from numpy import linalg as LA import random +from ..graph import Graph +from ..partition import Partition +from typing import Dict, Optional -def spectral_cut(graph, part_labels, weight_type, lap_type): - """Spectral cut function. +def spectral_cut( + graph: Graph, + part_labels: Dict, + weight_type: str, + lap_type: str +) -> Dict: + """ + Spectral cut function. Uses the signs of the elements in the Fiedler vector of a graph to partition into two components. + :param graph: The graph to be partitioned. + :type graph: Graph + :param part_labels: The current partition of the graph. + :type part_labels: Dict + :param weight_type: The type of weight to be used in the Laplacian. + :type weight_type: str + :param lap_type: The type of Laplacian to be used. + :type lap_type: str + + :returns: A dictionary assigning nodes of the graph to their new districts. + :rtype: Dict """ nlist = list(graph.nodes()) @@ -35,7 +55,11 @@ def spectral_cut(graph, part_labels, weight_type, lap_type): return clusters -def spectral_recom(partition, weight_type=None, lap_type="normalized"): +def spectral_recom( + partition: Partition, + weight_type: Optional[str] = None, + lap_type: str = "normalized" +) -> Partition: """Spectral ReCom proposal. Uses spectral clustering to bipartition a subgraph of the original graph @@ -56,6 +80,15 @@ def spectral_recom(partition, weight_type=None, lap_type="normalized"): chain = MarkovChain(proposal, constraints, accept, partition, total_steps) + :param partition: The initial partition. + :type partition: Partition + :param weight_type: The type of weight to be used in the Laplacian. Default is None. + :type weight_type: Optional[str], optional + :param lap_type: The type of Laplacian to be used. Default is "normalized". + :type lap_type: str, optional + + :returns: The new partition resulting from the spectral ReCom algorithm. + :rtype: Partition """ edge = random.choice(tuple(partition["cut_edges"])) diff --git a/gerrychain/proposals/tree_proposals.py b/gerrychain/proposals/tree_proposals.py index 4bb781f8..c4557aec 100644 --- a/gerrychain/proposals/tree_proposals.py +++ b/gerrychain/proposals/tree_proposals.py @@ -21,6 +21,14 @@ def recom( method: Callable = bipartition_tree ) -> Partition: """ + ReCom (short for ReCombination) is a Markov Chain Monte Carlo (MCMC) algorithm + used for redistricting. At each step of the algorithm, a pair of adjacent districts + is selected at random and merged into a single district. The region is then split + into two new districts by generating a spanning tree using the Kruskal/Karger + algorithm and cutting an edge at random. The edge is checked to ensure that it + separates the region into two new districts that are population balanced, and, + if not, a new edge is selected at random and the process is repeated. + Example usage: .. code-block:: python @@ -40,6 +48,25 @@ def recom( chain = MarkovChain(proposal, constraints, accept, partition, total_steps) + :param partition: The initial partition. + :type partition: Partition + :param pop_col: The name of the population column. + :type pop_col: str + :param pop_target: The target population for each district. + :type pop_target: float + :param epsilon: The epsilon value for population deviation. + :type epsilon: float + :param node_repeats: The number of times to repeat the bipartitioning step. Default is 1. + :type node_repeats: int, optional + :param weight_dict: The weight dictionary for the graph used for region-aware + partitioning of the grid. Default is None. + :type weight_dict: Optional[Dict], optional + :param method: The method used for bipartitioning the tree. Default is + :func:`~gerrychain.tree.bipartition_tree`. + :type method: Callable, optional + + :returns: The new partition resulting from the ReCom algorithm. + :rtype: Partition """ edge = random.choice(tuple(partition["cut_edges"])) @@ -52,7 +79,6 @@ def recom( # Try to add the region aware in if the method accepts the weight dictionary if 'weight_dict' in signature(method).parameters: method = partial(method, weight_dict=weight_dict) - flips = recursive_tree_part( subgraph.graph, @@ -67,10 +93,47 @@ def recom( return partition.flip(flips) -def reversible_recom(partition, pop_col, pop_target, epsilon, - balance_edge_fn=find_balanced_edge_cuts_memoization, M=1, - repeat_until_valid=False, choice=random.choice): - """Reversible ReCom proposal.""" +def reversible_recom( + partition: Partition, + pop_col: str, + pop_target: float, + epsilon: float, + balance_edge_fn: Callable = find_balanced_edge_cuts_memoization, + M: int = 1, + repeat_until_valid: bool = False, + choice: Callable = random.choice +) -> Partition: + """ + Reversible ReCom algorithm for redistricting. + + This function performs the reversible ReCom algorithm, which is a Markov Chain Monte + Carlo (MCMC) algorithm used for redistricting. For more information, see the paper + "Spanning Tree Methods for Sampling Graph Partitions" by Cannon, et al. (2022) at + https://arxiv.org/abs/2210.01401 + + :param partition: The initial partition. + :type partition: Partition + :param pop_col: The name of the population column. + :type pop_col: str + :param pop_target: The target population for each district. + :type pop_target: float + :param epsilon: The epsilon value for population deviation. + :type epsilon: float + :param balance_edge_fn: The balance edge function. Default is + find_balanced_edge_cuts_memoization. + :type balance_edge_fn: Callable, optional + :param M: The maximum number of balance edges. Default is 1. + :type M: int, optional + :param repeat_until_valid: Flag indicating whether to repeat until a valid partition is + found. Default is False. + :type repeat_until_valid: bool, optional + :param choice: The choice function for selecting a random element. Default is random.choice. + :type choice: Callable, optional + + :returns: The new partition resulting from the reversible ReCom algorithm. + :rtype: Partition + """ + def dist_pair_edges(part, a, b): return set( e for e in part.graph.edges @@ -139,13 +202,35 @@ def bounded_balance_edge_fn(*args, **kwargs): class ReCom: - def __init__(self, pop_col, ideal_pop, epsilon, method=bipartition_tree_random): + """ + ReCom (short for ReCombination) is a class that represents a ReCom proposal + for redistricting. It is used to create new partitions by recombining existing + districts while maintaining population balance. + + """ + + def __init__(self, + pop_col: str, + ideal_pop: int, + epsilon: float, + method: Callable = bipartition_tree_random): + """ + :param pop_col: The name of the column in the partition that contains the population data. + :type pop_col: str + :param ideal_pop: The ideal population for each district. + :type ideal_pop: float + :param epsilon: The maximum allowable deviation from the ideal population. + :type epsilon: float + :param method: The method used for bipartitioning the tree. + Defaults to `bipartition_tree_random`. + :type method: function, optional + """ self.pop_col = pop_col self.ideal_pop = ideal_pop self.epsilon = epsilon self.method = method - def __call__(self, partition): + def __call__(self, partition: Partition): return recom( partition, self.pop_col, self.ideal_pop, self.epsilon, method=self.method ) @@ -153,5 +238,6 @@ def __call__(self, partition): class ReversibilityError(Exception): """Raised when the cut edge upper bound is violated.""" + def __init__(self, msg): self.message = msg