Skip to content

Commit

Permalink
Indentation second pass
Browse files Browse the repository at this point in the history
  • Loading branch information
lightvector committed May 22, 2023
1 parent 3e7d4a8 commit d02f23e
Show file tree
Hide file tree
Showing 19 changed files with 1,628 additions and 1,629 deletions.
16 changes: 8 additions & 8 deletions python/board.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,10 +133,10 @@ def is_simple_eye(self,pla,loc):
return True

against_wall = (
self.board[adj0] == Board.WALL or \
self.board[adj1] == Board.WALL or \
self.board[adj2] == Board.WALL or \
self.board[adj3] == Board.WALL
self.board[adj0] == Board.WALL or \
self.board[adj1] == Board.WALL or \
self.board[adj2] == Board.WALL or \
self.board[adj3] == Board.WALL
)

if against_wall:
Expand Down Expand Up @@ -612,10 +612,10 @@ def countImmediateLiberties(self,loc):

def is_group_adjacent(self,head,loc):
return (
self.group_head[loc+self.adj[0]] == head or \
self.group_head[loc+self.adj[1]] == head or \
self.group_head[loc+self.adj[2]] == head or \
self.group_head[loc+self.adj[3]] == head
self.group_head[loc+self.adj[0]] == head or \
self.group_head[loc+self.adj[1]] == head or \
self.group_head[loc+self.adj[2]] == head or \
self.group_head[loc+self.adj[3]] == head
)

#Helper, merge two groups assuming they're owned by the same player and adjacent
Expand Down
24 changes: 12 additions & 12 deletions python/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,21 +96,21 @@ def load_sgf_moves_exn(path):
if rulesstr is not None:
if rulesstr.lower() == "japanese" or rulesstr.lower() == "jp":
rules = {
"koRule": "KO_SIMPLE",
"scoringRule": "SCORING_TERRITORY",
"multiStoneSuicideLegal": False,
"encorePhase": 0,
"passWouldEndPhase": False,
"whiteKomi": komi
"koRule": "KO_SIMPLE",
"scoringRule": "SCORING_TERRITORY",
"multiStoneSuicideLegal": False,
"encorePhase": 0,
"passWouldEndPhase": False,
"whiteKomi": komi
}
elif rulesstr.lower() == "chinese":
rules = {
"koRule": "KO_SIMPLE",
"scoringRule": "SCORING_AREA",
"multiStoneSuicideLegal": False,
"encorePhase": 0,
"passWouldEndPhase": False,
"whiteKomi": komi
"koRule": "KO_SIMPLE",
"scoringRule": "SCORING_AREA",
"multiStoneSuicideLegal": False,
"encorePhase": 0,
"passWouldEndPhase": False,
"whiteKomi": komi
}
elif rulesstr.startswith("ko"):
rules = {}
Expand Down
12 changes: 6 additions & 6 deletions python/edit_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
if output_json_to is not None:
assert output_json_to.endswith(".json")
data_to_write = dict(
running_metrics = data["running_metrics"],
train_state = data["train_state"],
config = data["config"] if "config" in data else None,
running_metrics = data["running_metrics"],
train_state = data["train_state"],
config = data["config"] if "config" in data else None,
)
with open(output_json_to,"w") as f:
json.dump(data,f,indent=2)
Expand All @@ -51,8 +51,8 @@

else:
data_to_write = dict(
running_metrics = data["running_metrics"],
train_state = data["train_state"],
config = data["config"] if "config" in data else None,
running_metrics = data["running_metrics"],
train_state = data["train_state"],
config = data["config"] if "config" in data else None,
)
print(json.dumps(data_to_write,indent=2))
86 changes: 43 additions & 43 deletions python/elo.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,36 +186,36 @@ def likelihood_of_games(
if not include_first_player_advantage:
if p1_won_proportion > 0.0:
ret.append(Likelihood(
playercombo={p1: 1.0, p2: -1.0},
offset=0.0,
weight=p1_won_proportion*num_games,
gamecount=p1_won_proportion*num_games,
kind=Likelihood.SIGMOID_KIND
playercombo={p1: 1.0, p2: -1.0},
offset=0.0,
weight=p1_won_proportion*num_games,
gamecount=p1_won_proportion*num_games,
kind=Likelihood.SIGMOID_KIND
))
if p1_won_proportion < 1.0:
ret.append(Likelihood(
playercombo={p2: 1.0, p1: -1.0},
offset=0.0,
weight=(1.0-p1_won_proportion)*num_games,
gamecount=(1.0-p1_won_proportion)*num_games,
kind=Likelihood.SIGMOID_KIND
playercombo={p2: 1.0, p1: -1.0},
offset=0.0,
weight=(1.0-p1_won_proportion)*num_games,
gamecount=(1.0-p1_won_proportion)*num_games,
kind=Likelihood.SIGMOID_KIND
))
else:
if p1_won_proportion > 0.0:
ret.append(Likelihood(
playercombo={p1: 1.0, p2: -1.0, P1_ADVANTAGE_NAME: 1.0},
offset=0.0,
weight=p1_won_proportion*num_games,
gamecount=p1_won_proportion*num_games,
kind=Likelihood.SIGMOID_KIND
playercombo={p1: 1.0, p2: -1.0, P1_ADVANTAGE_NAME: 1.0},
offset=0.0,
weight=p1_won_proportion*num_games,
gamecount=p1_won_proportion*num_games,
kind=Likelihood.SIGMOID_KIND
))
if p1_won_proportion < 1.0:
ret.append(Likelihood(
playercombo={p2: 1.0, p1: -1.0, P1_ADVANTAGE_NAME: -1.0},
offset=0.0,
weight=(1.0-p1_won_proportion)*num_games,
gamecount=(1.0-p1_won_proportion)*num_games,
kind=Likelihood.SIGMOID_KIND
playercombo={p2: 1.0, p1: -1.0, P1_ADVANTAGE_NAME: -1.0},
offset=0.0,
weight=(1.0-p1_won_proportion)*num_games,
gamecount=(1.0-p1_won_proportion)*num_games,
kind=Likelihood.SIGMOID_KIND
))

return ret
Expand All @@ -238,18 +238,18 @@ def make_single_player_prior(
assert np.isfinite(elo)
if num_games > 0.0:
ret.append(Likelihood(
playercombo={p1: 1.0},
offset=(-elo / ELO_PER_STRENGTH),
weight=0.5*num_games,
gamecount=0.5*num_games,
kind=Likelihood.SIGMOID_KIND
playercombo={p1: 1.0},
offset=(-elo / ELO_PER_STRENGTH),
weight=0.5*num_games,
gamecount=0.5*num_games,
kind=Likelihood.SIGMOID_KIND
))
ret.append(Likelihood(
playercombo={p1: -1.0},
offset=(elo / ELO_PER_STRENGTH),
weight=0.5*num_games,
gamecount=0.5*num_games,
kind=Likelihood.SIGMOID_KIND
playercombo={p1: -1.0},
offset=(elo / ELO_PER_STRENGTH),
weight=0.5*num_games,
gamecount=0.5*num_games,
kind=Likelihood.SIGMOID_KIND
))
return ret

Expand Down Expand Up @@ -278,11 +278,11 @@ def make_sequential_prior(

for i in range(len(players)-1):
ret.extend(likelihood_of_games(
p1=players[i],
p2=players[i+1],
num_games=num_games,
p1_won_proportion=0.5,
include_first_player_advantage=False,
p1=players[i],
p2=players[i+1],
num_games=num_games,
p1_won_proportion=0.5,
include_first_player_advantage=False,
))
return ret

Expand All @@ -304,11 +304,11 @@ def make_center_elos_prior(
assert len(set(players)) == len(players), "players must not contain any duplicates"
playercombo = { player: 1.0 for player in players }
ret.append(Likelihood(
playercombo=playercombo,
offset=-len(players) * elo / ELO_PER_STRENGTH,
weight=0.001,
gamecount=0.0,
kind=Likelihood.GAUSSIAN_KIND
playercombo=playercombo,
offset=-len(players) * elo / ELO_PER_STRENGTH,
weight=0.001,
gamecount=0.0,
kind=Likelihood.GAUSSIAN_KIND
))
return ret

Expand Down Expand Up @@ -446,9 +446,9 @@ def line_search_ascend(strengths: np.array, cur_loglikelihood: float) -> Tuple[n
elo_stderr = { player: math.sqrt(1.0 / elo_precision[player_to_idx[player],player_to_idx[player]]) for player in players },
elo_covariance = { (p1,p2): elo_covariance[player_to_idx[p1],player_to_idx[p2]] for p1 in players for p2 in players },
effective_game_count = {
player: (np.square(sqrt_ess_numerator[player_to_idx[player],player_to_idx[player]]) /
ess_denominator[player_to_idx[player],player_to_idx[player]])
for player in players
player: (np.square(sqrt_ess_numerator[player_to_idx[player],player_to_idx[player]]) /
ess_denominator[player_to_idx[player],player_to_idx[player]])
for player in players
},
)
return info
Expand Down
20 changes: 10 additions & 10 deletions python/export_model_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,12 @@ def main(args):

logging.root.handlers = []
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[
logging.StreamHandler(stream=sys.stdout),
logging.FileHandler(export_dir + "/log.txt"),
],
level=logging.INFO,
format="%(message)s",
handlers=[
logging.StreamHandler(stream=sys.stdout),
logging.FileHandler(export_dir + "/log.txt"),
],
)
np.set_printoptions(linewidth=150)

Expand Down Expand Up @@ -369,13 +369,13 @@ def write_model(model):
if "running_metrics" in other_state_dict:
assert sorted(list(other_state_dict["running_metrics"].keys())) == ["sums", "weights"]
data["extra_stats"] = {
"sums": { key: value for (key,value) in other_state_dict["running_metrics"]["sums"].items() if "sopt" not in key and "lopt" not in key },
"weights": { key: value for (key,value) in other_state_dict["running_metrics"]["weights"].items() if "sopt" not in key and "lopt" not in key },
"sums": { key: value for (key,value) in other_state_dict["running_metrics"]["sums"].items() if "sopt" not in key and "lopt" not in key },
"weights": { key: value for (key,value) in other_state_dict["running_metrics"]["weights"].items() if "sopt" not in key and "lopt" not in key },
}
if "last_val_metrics" in other_state_dict and "sums" in other_state_dict["last_val_metrics"] and "weights" in other_state_dict["last_val_metrics"]:
data["extra_stats"]["last_val_metrics"] = {
"sums": { key: value for (key,value) in other_state_dict["last_val_metrics"]["sums"].items() if "sopt" not in key and "lopt" not in key },
"weights": { key: value for (key,value) in other_state_dict["last_val_metrics"]["weights"].items() if "sopt" not in key and "lopt" not in key },
"sums": { key: value for (key,value) in other_state_dict["last_val_metrics"]["sums"].items() if "sopt" not in key and "lopt" not in key },
"weights": { key: value for (key,value) in other_state_dict["last_val_metrics"]["weights"].items() if "sopt" not in key and "lopt" not in key },
}
json.dump(data,f)

Expand Down
8 changes: 4 additions & 4 deletions python/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,10 +221,10 @@ def addPrevPrevLadderFeature(loc,pos,workingMoves):

if hasAreaFeature:
board.calculateNonDameTouchingArea(
area,
keepTerritories,
keepStones,
rules["multiStoneSuicideLegal"]
area,
keepTerritories,
keepStones,
rules["multiStoneSuicideLegal"]
)

for y in range(bsize):
Expand Down
10 changes: 5 additions & 5 deletions python/forward_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ def main(args):

logging.root.handlers = []
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[
logging.StreamHandler(stream=sys.stdout)
],
level=logging.INFO,
format="%(message)s",
handlers=[
logging.StreamHandler(stream=sys.stdout)
],
)
np.set_printoptions(linewidth=150)

Expand Down
16 changes: 8 additions & 8 deletions python/genboard_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,8 @@ def __iter__(self):
num_always_known_poses = 0
else:
num_always_known_poses = (
( min(alwaysknownxmax, metadata.size-1) - max(alwaysknownxmin, 0) + 1) *
( min(alwaysknownymax, metadata.size-1) - max(alwaysknownymin, 0) + 1)
( min(alwaysknownxmax, metadata.size-1) - max(alwaysknownxmin, 0) + 1) *
( min(alwaysknownymax, metadata.size-1) - max(alwaysknownymin, 0) + 1)
)
num_not_always_known_poses = metadata.size * metadata.size - num_always_known_poses
inferenceidx = rand.randint(0,num_not_always_known_poses-1)
Expand Down Expand Up @@ -452,12 +452,12 @@ def lossfunc(inputs, results, preds, aux, auxpreds):
running_ewms_exgnorm += max(0.0, gnorm - grad_clip_max)
if running_batch_count >= print_every_batches:
trainlog("TRAIN samples: %d, batches: %d, main loss: %.5f, aux loss: %.5f, gnorm: %.2f, ewms_exgnorm: %.3g" % (
traindata["samples_so_far"],
traindata["batches_so_far"],
running_main_loss / (running_batch_count * batch_size),
running_aux_loss / (running_batch_count * batch_size),
running_gnorm / (running_batch_count),
running_ewms_exgnorm / (running_batch_count),
traindata["samples_so_far"],
traindata["batches_so_far"],
running_main_loss / (running_batch_count * batch_size),
running_aux_loss / (running_batch_count * batch_size),
running_gnorm / (running_batch_count),
running_ewms_exgnorm / (running_batch_count),
))
running_batch_count = 0
running_main_loss = 0.0
Expand Down
1 change: 0 additions & 1 deletion python/migrate_double_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def expand_out_dim_for(name, scale):


if any("intermediate_value_head" in key for key in data["model"].keys()):

expand_out_dim_for("intermediate_value_head.conv1.weight", scale=1.0)
expand_in_dim_for("intermediate_value_head.bias1.beta", scale=1.0)
expand_in_dim_for("intermediate_value_head.linear2.weight", scale=math.sqrt(0.5))
Expand Down
Loading

0 comments on commit d02f23e

Please sign in to comment.