Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Solution Tracking #592

Draft
wants to merge 6 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 16 additions & 8 deletions apps/arweave/include/ar_mining.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
cache_ref = not_set, %% not serialized
chunk1 = not_set, %% not serialized
chunk2 = not_set, %% not serialized
cm_diff = not_set, %% serialized. set to the difficulty used by the H1 miner
diff_pair = not_set, %% serialized. set to the difficulty associated with this candidate.
%% diff_pair can be set by CM peers or a pool server.
cm_h1_list = [], %% serialized. list of {h1, nonce} pairs
cm_lead_peer = not_set, %% not serialized. if set, this candidate came from another peer
h0 = not_set, %% serialized
Expand All @@ -27,31 +28,38 @@
preimage = not_set, %% serialized. this can be either the h1 or h2 preimage
seed = not_set, %% serialized
session_key = not_set, %% serialized
solution_peer = not_set, %% serialized. if set, the winning hash came from another peer
start_interval_number = not_set, %% serialized
step_number = not_set, %% serialized
label = <<"not_set">> %% not atom, for prevent atom table pollution DoS
label = <<"not_set">> %% not atom, in order to prevent atom table pollution DoS
}).

-record(mining_solution, {
last_step_checkpoints = [],
merkle_rebase_threshold = 0,
last_step_checkpoints = [],
mining_address = << 0:256 >>,
next_seed = << 0:(8 * 48) >>,
next_vdf_difficulty = 0,
nonce = 0,
nonce_limiter_output = << 0:256 >>,
partition_number = 0,
partition_upper_bound = 0,
poa1 = #poa{},
poa2 = #poa{},
preimage = << 0:256 >>,
recall_byte1 = 0,
recall_byte2 = undefined,
seed = << 0:(8 * 48) >>,
solution_hash = << 0:256 >>,
solution_peer = not_set, %% serialized. if set, the solution hash came from another peer
start_interval_number = 0,
step_number = 0,
steps = [],
seed = << 0:(8 * 48) >>,
mining_address = << 0:256 >>,
partition_upper_bound = 0
steps = []
}).

%% @doc Solution validation response.
-record(solution_response, {
indep_hash = <<>>,
status = <<>>
}).

-endif.
6 changes: 0 additions & 6 deletions apps/arweave/include/ar_pool.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,6 @@
partition_upper_bound = 0
}).

%% @doc Partial solution validation response.
-record(partial_solution_response, {
indep_hash = <<>>,
status = <<>>
}).

%% @doc A set of coordinated mining jobs provided by the pool.
%%
%% Miners fetch and submit pool CM jobs via the same POST /pool_cm_jobs endpoint.
Expand Down
5 changes: 4 additions & 1 deletion apps/arweave/src/ar_chain_stats.erl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
-behaviour(gen_server).

-include_lib("arweave/include/ar.hrl").
-include_lib("arweave/include/ar_config.hrl").
-include_lib("arweave/include/ar_chain_stats.hrl").
-include_lib("eunit/include/eunit.hrl").

Expand Down Expand Up @@ -38,7 +39,9 @@ get_forks(StartTime) ->
init([]) ->
%% Trap exit to avoid corrupting any open files on quit..
process_flag(trap_exit, true),
ok = ar_kv:open(filename:join(?ROCKS_DB_DIR, "forks_db"), forks_db),
{ok, Config} = application:get_env(arweave, config),
RocksDBDir = filename:join(Config#config.data_dir, ?ROCKS_DB_DIR),
ok = ar_kv:open(filename:join(RocksDBDir, "forks_db"), forks_db),
{ok, #{}}.

handle_call({get_forks, StartTime}, _From, State) ->
Expand Down
32 changes: 17 additions & 15 deletions apps/arweave/src/ar_coordination.erl
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ computed_h1(Candidate, DiffPair) ->
ShareableCandidate = Candidate#mining_candidate{
chunk1 = not_set,
chunk2 = not_set,
cm_diff = DiffPair,
diff_pair = DiffPair,
cm_lead_peer = not_set,
h1 = not_set,
h2 = not_set,
Expand Down Expand Up @@ -243,8 +243,20 @@ handle_cast({compute_h2_for_peer, Candidate}, State) ->
{noreply, State};

handle_cast({computed_h2_for_peer, Candidate}, State) ->
#mining_candidate{ cm_lead_peer = Peer } = Candidate,
send_h2(Peer, Candidate),
#mining_candidate{
h0 = H0, nonce = Nonce, partition_number = Partition1,
partition_upper_bound = PartitionUpperBound, cm_lead_peer = Peer
} = Candidate,

{_RecallByte1, RecallByte2} = ar_mining_server:get_recall_bytes(H0, Partition1,
Nonce, PartitionUpperBound),
PoA = ar_mining_server:load_poa(RecallByte2, Candidate),
case PoA of
not_found ->
ar_mining_router:reject_solution(Candidate, failed_to_read_chunk_proofs, []);
_ ->
send_h2(Peer, Candidate#mining_candidate{ poa2 = PoA })
end,
{noreply, State};

handle_cast(refetch_peer_partitions, State) ->
Expand Down Expand Up @@ -385,24 +397,14 @@ send_h1(Candidate, State) ->
spawn(fun() ->
ar_http_iface_client:cm_h1_send(Peer, Candidate2)
end),
case Peer of
{pool, _} ->
ar_mining_stats:h1_sent_to_peer(pool, length(H1List));
_ ->
ar_mining_stats:h1_sent_to_peer(Peer, length(H1List))
end
ar_mining_stats:h1_sent_to_peer(Peer, length(H1List))
end.

send_h2(Peer, Candidate) ->
spawn(fun() ->
ar_http_iface_client:cm_h2_send(Peer, Candidate)
end),
case Peer of
{pool, _} ->
ar_mining_stats:h2_sent_to_peer(pool);
_ ->
ar_mining_stats:h2_sent_to_peer(Peer)
end.
ar_mining_stats:h2_sent_to_peer(Peer).

add_mining_peer({Peer, StorageModules}, State) ->
Partitions = lists:map(
Expand Down
2 changes: 0 additions & 2 deletions apps/arweave/src/ar_events_sup.erl
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ init([]) ->
?CHILD(ar_events, chunk_storage, worker),
%% Events: add_range, remove_range, global_remove_range, cut, global_cut.
?CHILD(ar_events, sync_record, worker),
%% Events: rejected, stale, partial, accepted.
?CHILD(ar_events, solution, worker),
%% Used for the testing purposes.
?CHILD(ar_events, testing, worker)
]}}.
23 changes: 8 additions & 15 deletions apps/arweave/src/ar_http_iface_client.erl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
get_block_time_history/3,
push_nonce_limiter_update/3, get_vdf_update/1, get_vdf_session/1,
get_previous_vdf_session/1, get_cm_partition_table/1, cm_h1_send/2, cm_h2_send/2,
cm_publish_send/2, get_jobs/2, post_partial_solution/2,
get_jobs/2, post_partial_solution/2,
get_pool_cm_jobs/2, post_pool_cm_jobs/2, post_cm_partition_table_to_pool/2]).

-include_lib("arweave/include/ar.hrl").
Expand Down Expand Up @@ -591,17 +591,10 @@ cm_h1_send(Peer, Candidate) ->
cm_h2_send(Peer, Candidate) ->
JSON = ar_serialize:jsonify(ar_serialize:candidate_to_json_struct(Candidate)),
Req = build_cm_or_pool_request(post, Peer, "/coordinated_mining/h2", JSON),
handle_cm_noop_response(ar_http:req(Req)).

cm_publish_send(Peer, Solution) ->
?LOG_DEBUG([{event, cm_publish_send}, {peer, ar_util:format_peer(Peer)},
{solution, ar_util:encode(Solution#mining_solution.solution_hash)},
{step_number, Solution#mining_solution.step_number},
{start_interval_number, Solution#mining_solution.start_interval_number},
{seed, ar_util:encode(Solution#mining_solution.seed)}]),
JSON = ar_serialize:jsonify(ar_serialize:solution_to_json_struct(Solution)),
Req = build_cm_or_pool_request(post, Peer, "/coordinated_mining/publish", JSON),
handle_cm_noop_response(ar_http:req(Req)).
handle_solution_response(ar_http:req(Req#{
timeout => 20 * 1000,
connect_timeout => 5 * 1000
})).

%% @doc Fetch the jobs from the pool or coordinated mining exit peer.
get_jobs(Peer, PrevOutput) ->
Expand All @@ -619,7 +612,7 @@ post_partial_solution(Peer, Solution) ->
ar_serialize:jsonify(ar_serialize:solution_to_json_struct(Solution))
end,
Req = build_cm_or_pool_request(post, Peer, "/partial_solution", Payload),
handle_post_partial_solution_response(ar_http:req(Req#{
handle_solution_response(ar_http:req(Req#{
timeout => 20 * 1000,
connect_timeout => 5 * 1000
})).
Expand Down Expand Up @@ -704,14 +697,14 @@ handle_post_pool_cm_jobs_response({ok, {{<<"200">>, _}, _, _, _, _}}) ->
handle_post_pool_cm_jobs_response(Reply) ->
{error, Reply}.

handle_post_partial_solution_response({ok, {{<<"200">>, _}, _, Body, _, _}}) ->
handle_solution_response({ok, {{<<"200">>, _}, _, Body, _, _}}) ->
case catch jiffy:decode(Body, [return_maps]) of
{'EXIT', _} ->
{error, invalid_json};
Response ->
{ok, Response}
end;
handle_post_partial_solution_response(Reply) ->
handle_solution_response(Reply) ->
{error, Reply}.

handle_get_jobs_response({ok, {{<<"200">>, _}, _, Body, _, _}}) ->
Expand Down
113 changes: 113 additions & 0 deletions apps/arweave/src/ar_http_iface_cm_pool.erl
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
-module(ar_http_iface_cm_pool).

-export([handle_post_solution/2, handle_get_jobs/3]).

-include_lib("arweave/include/ar_config.hrl").

%%%===================================================================
%%% Public interface.
%%%===================================================================

handle_post_solution(Req, Pid) ->
case ar_node:is_joined() of
false ->
not_joined(Req);
true ->
APISecret = case {ar_pool:is_server(), ar_coordination:is_exit_peer()} of
{true, _} ->
pool;
{_, true} ->
cm;
_ ->
error
end,
case validate_request(APISecret, Req) of
true ->
handle_post_solution2(Req, Pid);
FailureResponse ->
FailureResponse
end
end.

handle_get_jobs(EncodedPrevOutput, Req, Pid) ->
case ar_node:is_joined() of
false ->
not_joined(Req);
true ->
case ar_util:safe_decode(EncodedPrevOutput) of
{ok, PrevOutput} ->
handle_get_jobs2(PrevOutput, Req);
{error, invalid} ->
{400, #{}, jiffy:encode(#{ error => invalid_prev_output }), Req}
end
end;


%%%===================================================================
%%% Internal functions.
%%%===================================================================

handle_post_solution2(Req, Pid) ->
Peer = ar_http_util:arweave_peer(Req),
case read_complete_body(Req, Pid) of
{ok, Body, Req2} ->
case catch ar_serialize:json_map_to_solution(
jiffy:decode(Body, [return_maps])) of
{'EXIT', _} ->
{400, #{}, jiffy:encode(#{ error => invalid_json }), Req2};
Solution ->
ar_mining_router:received_solution(Solution,
[{peer, ar_util:format_peer(Peer)}]),
Response = ar_mining_router:route_solution(Solution),
JSON = ar_serialize:solution_response_to_json_struct(Response),
{200, #{}, ar_serialize:jsonify(JSON), Req2}
end;
{error, body_size_too_large} ->
{413, #{}, <<"Payload too large">>, Req};
{error, timeout} ->
{500, #{}, <<"Handler timeout">>, Req}
end.

handle_get_jobs2(PrevOutput, Req) ->
{ok, Config} = application:get_env(arweave, config),
CMExitNode = ar_coordination:is_exit_peer() andalso ar_pool:is_client(),
case {Config#config.is_pool_server, CMExitNode} of
{false, false} ->
{501, #{}, jiffy:encode(#{ error => configuration }), Req};
{true, _} ->
case check_internal_api_secret(Req) of
{reject, {Status, Headers, Body}} ->
{Status, Headers, Body, Req};
pass ->
Jobs = ar_pool:generate_jobs(PrevOutput),
JSON = ar_serialize:jsonify(ar_serialize:jobs_to_json_struct(Jobs)),
{200, #{}, JSON, Req}
end;
{_, true} ->
case check_cm_api_secret(Req) of
{reject, {Status, Headers, Body}} ->
{Status, Headers, Body, Req};
pass ->
Jobs = ar_pool:get_cached_jobs(PrevOutput),
JSON = ar_serialize:jsonify(ar_serialize:jobs_to_json_struct(Jobs)),
{200, #{}, JSON, Req}
end
end.

validate_request(APISecret, Req) ->
SecretCheck = case APISecret of
pool ->
check_internal_api_secret(Req);
cm ->
check_cm_api_secret(Req);
_ ->
{501, #{}, jiffy:encode(#{ error => configuration }), Req}
end,
case SecretCheck of
pass ->
true;
{reject, {Status, Headers, Body}} ->
{Status, Headers, Body, Req};
_ ->
SecretCheck
end.
Loading
Loading