Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[BIT 584] [feature] btcli register output stats not in place #923

Merged
Merged
Show file tree
Hide file tree
Changes from 47 commits
Commits
Show all changes
50 commits
Select commit Hold shift + click to select a range
b7371cf
add flags for output_in_place during registration
camfairchild Sep 21, 2022
2d075fb
stop tracking best
camfairchild Sep 21, 2022
76d3f93
refactor registration logging output
camfairchild Sep 21, 2022
dd7c4bd
fix reregister from type bool
camfairchild Sep 21, 2022
38e63a7
change in_place and use_cuda to strtobool
camfairchild Sep 21, 2022
67c398b
add param and defaults
camfairchild Sep 21, 2022
c2acb33
fix reference before assignment
camfairchild Sep 21, 2022
f80af3e
add new logger to cuda rege
camfairchild Sep 21, 2022
793c7f8
pass param to btcli register call
camfairchild Sep 21, 2022
5efc050
oops
camfairchild Sep 21, 2022
5bca823
fix init
camfairchild Sep 21, 2022
1298823
try slight timeout
camfairchild Sep 21, 2022
adfc57c
try fix
camfairchild Sep 21, 2022
56d1cdc
oop
camfairchild Sep 21, 2022
bb9a324
?
camfairchild Sep 21, 2022
0f0d1b3
fix use_cuda flag
camfairchild Sep 21, 2022
55eae1e
add test for new use_cuda flag setup
camfairchild Sep 21, 2022
852f390
use create pow to patch
camfairchild Sep 21, 2022
b4ca52b
all no prompt dev id
camfairchild Sep 21, 2022
c5d2c74
fix console.error
camfairchild Sep 21, 2022
bde8fc2
use lower for str comparison
camfairchild Sep 21, 2022
ad290d6
call self register instead
camfairchild Sep 21, 2022
66088a3
add test for wallet register call
camfairchild Sep 21, 2022
f27e95e
tests are for wallet reregister
camfairchild Sep 21, 2022
8aaf34f
fix typo
camfairchild Sep 21, 2022
2c382de
no self on top-level test
camfairchild Sep 21, 2022
0d55177
fix tests?
camfairchild Sep 21, 2022
5616b7f
use reregister
camfairchild Sep 21, 2022
a2c4458
typo in test
camfairchild Sep 21, 2022
da845d1
fix assert
camfairchild Sep 21, 2022
8c170ba
fix assert
camfairchild Sep 21, 2022
0403ff4
should be False
camfairchild Sep 21, 2022
9f6b5f3
Merge branch 'nobunaga' into BIT-584-btcli-register-output-stats-not-…
Sep 22, 2022
4b1987e
Merge branch 'nobunaga' into BIT-584-btcli-register-output-stats-not-…
Sep 25, 2022
ee109a5
Merge branch 'nobunaga' into BIT-584-btcli-register-output-stats-not-…
Oct 3, 2022
d8cf3fe
fix time output to use timedelta
camfairchild Oct 3, 2022
bf41c1e
add log verbose as option to reg output
camfairchild Oct 3, 2022
6a822e3
Merge branch 'nobunaga' into BIT-584-btcli-register-output-stats-not-…
camfairchild Oct 4, 2022
e98c16e
should be action
camfairchild Oct 4, 2022
432bd49
fix typo
camfairchild Oct 4, 2022
bb9b5ea
add missing function arg
camfairchild Oct 4, 2022
2715036
fix spacing
camfairchild Oct 4, 2022
dbfe5c8
fix flags
camfairchild Oct 4, 2022
2c58216
fix flags
camfairchild Oct 4, 2022
d742f04
fix test
camfairchild Oct 5, 2022
b359b98
should pass in args to config pre-parse
camfairchild Oct 5, 2022
0c3f0be
Merge branch 'nobunaga' into BIT-584-btcli-register-output-stats-not-…
isabella618033 Oct 11, 2022
696052f
Merge branch 'nobunaga' into BIT-584-btcli-register-output-stats-not-…
unconst Oct 11, 2022
374fbb5
use None instead of NA
camfairchild Oct 12, 2022
60e7336
Merge branch 'nobunaga' into BIT-584-btcli-register-output-stats-not-…
camfairchild Oct 12, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 32 additions & 26 deletions bittensor/_cli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -832,32 +832,38 @@ def check_overview_config( config: 'bittensor.Config' ):
def _check_for_cuda_reg_config( config: 'bittensor.Config' ) -> None:
"""Checks, when CUDA is available, if the user would like to register with their CUDA device."""
if torch.cuda.is_available():
if config.subtensor.register.cuda.get('use_cuda') is None:
# Ask about cuda registration only if a CUDA device is available.
cuda = Confirm.ask("Detected CUDA device, use CUDA for registration?\n")
config.subtensor.register.cuda.use_cuda = cuda

# Only ask about which CUDA device if the user has more than one CUDA device.
if config.subtensor.register.cuda.use_cuda and config.subtensor.register.cuda.get('dev_id') is None and torch.cuda.device_count() > 0:
devices: List[str] = [str(x) for x in range(torch.cuda.device_count())]
device_names: List[str] = [torch.cuda.get_device_name(x) for x in range(torch.cuda.device_count())]
console.print("Available CUDA devices:")
choices_str: str = ""
for i, device in enumerate(devices):
choices_str += (" {}: {}\n".format(device, device_names[i]))
console.print(choices_str)
dev_id = IntListPrompt.ask("Which GPU(s) would you like to use? Please list one, or comma-separated", choices=devices, default='All')
if dev_id == 'All':
dev_id = list(range(torch.cuda.device_count()))
else:
try:
# replace the commas with spaces then split over whitespace.,
# then strip the whitespace and convert to ints.
dev_id = [int(dev_id.strip()) for dev_id in dev_id.replace(',', ' ').split()]
except ValueError:
console.error(":cross_mark:[red]Invalid GPU device[/red] [bold white]{}[/bold white]\nAvailable CUDA devices:{}".format(dev_id, choices_str))
sys.exit(1)
config.subtensor.register.cuda.dev_id = dev_id
if not config.no_prompt:
if config.subtensor.register.cuda.get('use_cuda', 'NA') == 'NA': # flag not set
# Ask about cuda registration only if a CUDA device is available.
cuda = Confirm.ask("Detected CUDA device, use CUDA for registration?\n")
config.subtensor.register.cuda.use_cuda = cuda


# Only ask about which CUDA device if the user has more than one CUDA device.
if config.subtensor.register.cuda.use_cuda and config.subtensor.register.cuda.get('dev_id') is None:
devices: List[str] = [str(x) for x in range(torch.cuda.device_count())]
device_names: List[str] = [torch.cuda.get_device_name(x) for x in range(torch.cuda.device_count())]
console.print("Available CUDA devices:")
choices_str: str = ""
for i, device in enumerate(devices):
choices_str += (" {}: {}\n".format(device, device_names[i]))
console.print(choices_str)
dev_id = IntListPrompt.ask("Which GPU(s) would you like to use? Please list one, or comma-separated", choices=devices, default='All')
if dev_id.lower() == 'all':
dev_id = list(range(torch.cuda.device_count()))
else:
try:
# replace the commas with spaces then split over whitespace.,
# then strip the whitespace and convert to ints.
dev_id = [int(dev_id.strip()) for dev_id in dev_id.replace(',', ' ').split()]
except ValueError:
console.log(":cross_mark:[red]Invalid GPU device[/red] [bold white]{}[/bold white]\nAvailable CUDA devices:{}".format(dev_id, choices_str))
sys.exit(1)
config.subtensor.register.cuda.dev_id = dev_id
else:
# flag was not set, use default value.
if config.subtensor.register.cuda.get('use_cuda') is None:
config.subtensor.register.cuda.use_cuda = bittensor.defaults.subtensor.register.cuda.use_cuda

def check_register_config( config: 'bittensor.Config' ):
if config.subtensor.get('network') == bittensor.defaults.subtensor.network and not config.no_prompt:
Expand Down
9 changes: 7 additions & 2 deletions bittensor/_cli/cli_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,8 +245,13 @@ def register( self ):
TPB = self.config.subtensor.register.cuda.get('TPB', None),
update_interval = self.config.subtensor.register.get('update_interval', None),
num_processes = self.config.subtensor.register.get('num_processes', None),
cuda = self.config.subtensor.register.cuda.get('use_cuda', None),
dev_id = self.config.subtensor.register.cuda.get('dev_id', None)
cuda = self.config.subtensor.register.cuda.get('use_cuda', 'NA')
if self.config.subtensor.register.cuda.get('use_cuda', 'NA') != 'NA'
else bittensor.defaults.subtensor.register.cuda.use_cuda
,
dev_id = self.config.subtensor.register.cuda.get('dev_id', None),
output_in_place = self.config.subtensor.register.get('output_in_place', bittensor.defaults.subtensor.register.output_in_place),
log_verbose = self.config.subtensor.register.get('verbose', bittensor.defaults.subtensor.register.verbose),
)

def transfer( self ):
Expand Down
10 changes: 5 additions & 5 deletions bittensor/_config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,16 +68,16 @@ def __new__( cls, parser: ArgumentParser = None, strict: bool = False, args: Opt
# this can fail if the --config has already been added.
pass

# Get args from argv if not passed in.
if args == None:
args = sys.argv[1:]

# 1.1 Optionally load defaults if the --config is set.
try:
config_file_path = str(os.getcwd()) + '/' + vars(parser.parse_known_args()[0])['config']
config_file_path = str(os.getcwd()) + '/' + vars(parser.parse_known_args(args)[0])['config']
except Exception as e:
config_file_path = None

# Get args from argv if not passed in.
if args == None:
args = sys.argv[1:]

# Parse args not strict
params = cls.__parse_args__(args=args, parser=parser, strict=False)

Expand Down
22 changes: 16 additions & 6 deletions bittensor/_subtensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
from substrateinterface import SubstrateInterface
from torch.cuda import is_available as is_cuda_available

from bittensor.utils import strtobool_with_default

from . import subtensor_impl, subtensor_mock

logger = logger.opt(colors=True)
Expand Down Expand Up @@ -187,13 +189,17 @@ def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None ):
help='''The subtensor endpoint flag. If set, overrides the --network flag.
''')
parser.add_argument('--' + prefix_str + 'subtensor._mock', action='store_true', help='To turn on subtensor mocking for testing purposes.', default=bittensor.defaults.subtensor._mock)

parser.add_argument('--' + prefix_str + 'subtensor.register.num_processes', '-n', dest='subtensor.register.num_processes', help="Number of processors to use for registration", type=int, default=bittensor.defaults.subtensor.register.num_processes)
# registration args. Used for register and re-register and anything that calls register.
parser.add_argument('--' + prefix_str + 'subtensor.register.num_processes', '-n', dest=prefix_str + 'subtensor.register.num_processes', help="Number of processors to use for registration", type=int, default=bittensor.defaults.subtensor.register.num_processes)
parser.add_argument('--' + prefix_str + 'subtensor.register.update_interval', '--' + prefix_str + 'subtensor.register.cuda.update_interval', '--' + prefix_str + 'cuda.update_interval', '-u', help="The number of nonces to process before checking for next block during registration", type=int, default=bittensor.defaults.subtensor.register.update_interval)
# registration args. Used for register and re-register and anything that calls register.
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.use_cuda', '--' + prefix_str + 'cuda', '--' + prefix_str + 'cuda.use_cuda', default=argparse.SUPPRESS, help='''Set true to use CUDA.''', action='store_true', required=False )
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.dev_id', '--' + prefix_str + 'cuda.dev_id', type=int, nargs='+', default=argparse.SUPPRESS, help='''Set the CUDA device id(s). Goes by the order of speed. (i.e. 0 is the fastest).''', required=False )
parser.add_argument('--' + prefix_str + 'subtensor.register.output_in_place', help="Whether to ouput the registration statistics in-place. Set flag to enable.", action='store_true', required=False, default=bittensor.defaults.subtensor.register.output_in_place)
parser.add_argument('--' + prefix_str + 'subtensor.register.verbose', help="Whether to ouput the registration statistics verbosely.", action='store_true', required=False, default=bittensor.defaults.subtensor.register.verbose)

## Registration args for CUDA registration.
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.use_cuda', '--' + prefix_str + 'cuda', '--' + prefix_str + 'cuda.use_cuda', default=argparse.SUPPRESS, help='''Set flag to use CUDA to register.''', action="store_true", required=False )
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.no_cuda', '--' + prefix_str + 'no_cuda', '--' + prefix_str + 'cuda.no_cuda', dest=prefix_str + 'subtensor.register.cuda.use_cuda', default=argparse.SUPPRESS, help='''Set flag to not use CUDA for registration''', action="store_false", required=False )

parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.dev_id', '--' + prefix_str + 'cuda.dev_id', type=int, nargs='+', default=argparse.SUPPRESS, help='''Set the CUDA device id(s). Goes by the order of speed. (i.e. 0 is the fastest).''', required=False )
parser.add_argument( '--' + prefix_str + 'subtensor.register.cuda.TPB', '--' + prefix_str + 'cuda.TPB', type=int, default=bittensor.defaults.subtensor.register.cuda.TPB, help='''Set the number of Threads Per Block for CUDA.''', required=False )

except argparse.ArgumentError:
Expand All @@ -212,20 +218,24 @@ def add_defaults(cls, defaults ):
defaults.subtensor.register = bittensor.Config()
defaults.subtensor.register.num_processes = os.getenv('BT_SUBTENSOR_REGISTER_NUM_PROCESSES') if os.getenv('BT_SUBTENSOR_REGISTER_NUM_PROCESSES') != None else None # uses processor count by default within the function
defaults.subtensor.register.update_interval = os.getenv('BT_SUBTENSOR_REGISTER_UPDATE_INTERVAL') if os.getenv('BT_SUBTENSOR_REGISTER_UPDATE_INTERVAL') != None else 50_000
defaults.subtensor.register.output_in_place = True
defaults.subtensor.register.verbose = False

defaults.subtensor.register.cuda = bittensor.Config()
defaults.subtensor.register.cuda.dev_id = [0]
defaults.subtensor.register.cuda.use_cuda = False
defaults.subtensor.register.cuda.TPB = 256



@staticmethod
def check_config( config: 'bittensor.Config' ):
assert config.subtensor
#assert config.subtensor.network != None
if config.subtensor.get('register') and config.subtensor.register.get('cuda'):
assert all((isinstance(x, int) or isinstance(x, str) and x.isnumeric() ) for x in config.subtensor.register.cuda.get('dev_id', []))

if config.subtensor.register.cuda.get('use_cuda', False):
if config.subtensor.register.cuda.get('use_cuda', bittensor.defaults.subtensor.register.cuda.use_cuda):
try:
import cubit
except ImportError:
Expand Down
8 changes: 6 additions & 2 deletions bittensor/_subtensor/subtensor_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,11 +444,13 @@ def register (
wait_for_finalization: bool = True,
prompt: bool = False,
max_allowed_attempts: int = 3,
output_in_place: bool = True,
cuda: bool = False,
dev_id: Union[List[int], int] = 0,
TPB: int = 256,
num_processes: Optional[int] = None,
update_interval: Optional[int] = None,
log_verbose: bool = False,
) -> bool:
r""" Registers the wallet to chain.
Args:
Expand All @@ -474,6 +476,8 @@ def register (
The number of processes to use to register.
update_interval (int):
The number of nonces to solve between updates.
log_verbose (bool):
If true, the registration process will log more information.
Returns:
success (bool):
flag is true if extrinsic was finalized or uncluded in the block.
Expand All @@ -500,9 +504,9 @@ def register (
if prompt:
bittensor.__console__.error('CUDA is not available.')
return False
pow_result = bittensor.utils.create_pow( self, wallet, cuda, dev_id, TPB, num_processes=num_processes, update_interval=update_interval )
pow_result = bittensor.utils.create_pow( self, wallet, output_in_place, cuda, dev_id, TPB, num_processes=num_processes, update_interval=update_interval, log_verbose=log_verbose )
else:
pow_result = bittensor.utils.create_pow( self, wallet, num_processes=num_processes, update_interval=update_interval)
pow_result = bittensor.utils.create_pow( self, wallet, output_in_place, num_processes=num_processes, update_interval=update_interval, log_verbose=log_verbose )

# pow failed
if not pow_result:
Expand Down
3 changes: 2 additions & 1 deletion bittensor/_wallet/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import os

import bittensor
from bittensor.utils import strtobool

from . import wallet_impl, wallet_mock

Expand Down Expand Up @@ -114,7 +115,7 @@ def add_args(cls, parser: argparse.ArgumentParser, prefix: str = None ):

parser.add_argument('--' + prefix_str + 'wallet.hotkeys', '--' + prefix_str + 'wallet.exclude_hotkeys', required=False, action='store', default=bittensor.defaults.wallet.hotkeys, type=str, nargs='*', help='''Specify the hotkeys by name. (e.g. hk1 hk2 hk3)''')
parser.add_argument('--' + prefix_str + 'wallet.all_hotkeys', required=False, action='store_true', default=bittensor.defaults.wallet.all_hotkeys, help='''To specify all hotkeys. Specifying hotkeys will exclude them from this all.''')
parser.add_argument('--' + prefix_str + 'wallet.reregister', required=False, action='store', default=bittensor.defaults.wallet.reregister, type=bool, help='''Whether to reregister the wallet if it is not already registered.''')
parser.add_argument('--' + prefix_str + 'wallet.reregister', required=False, action='store', default=bittensor.defaults.wallet.reregister, type=strtobool, help='''Whether to reregister the wallet if it is not already registered.''')

except argparse.ArgumentError as e:
pass
Expand Down
21 changes: 17 additions & 4 deletions bittensor/_wallet/wallet_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,16 +246,21 @@ def reregister(
if not self.config.wallet.get('reregister'):
sys.exit(0)

subtensor.register(
wallet = self,
self.register(
subtensor = subtensor,
prompt = prompt,
TPB = self.config.subtensor.register.cuda.get('TPB', None),
update_interval = self.config.subtensor.register.cuda.get('update_interval', None),
num_processes = self.config.subtensor.register.get('num_processes', None),
cuda = self.config.subtensor.register.cuda.get('use_cuda', None),
cuda = self.config.subtensor.register.cuda.get('use_cuda', 'NA')
if self.config.subtensor.register.cuda.get('use_cuda', 'NA') != 'NA'
else bittensor.defaults.subtensor.register.cuda.use_cuda
,
dev_id = self.config.subtensor.register.cuda.get('dev_id', None),
wait_for_inclusion = wait_for_inclusion,
wait_for_finalization = wait_for_finalization,
output_in_place = self.config.subtensor.register.get('output_in_place', bittensor.defaults.subtensor.register.output_in_place),
log_verbose = self.config.subtensor.register.get('verbose', bittensor.defaults.subtensor.register.verbose),
)

return self
Expand All @@ -272,6 +277,8 @@ def register (
TPB: int = 256,
num_processes: Optional[int] = None,
update_interval: Optional[int] = None,
output_in_place: bool = True,
log_verbose: bool = False,
) -> 'bittensor.Wallet':
""" Registers the wallet to chain.
Args:
Expand All @@ -297,6 +304,10 @@ def register (
The number of processes to use to register.
update_interval (int):
The number of nonces to solve between updates.
output_in_place (bool):
If true, the registration output is printed in-place.
log_verbose (bool):
If true, the registration output is more verbose.
Returns:
success (bool):
flag is true if extrinsic was finalized or uncluded in the block.
Expand All @@ -309,11 +320,13 @@ def register (
wait_for_inclusion = wait_for_inclusion,
wait_for_finalization = wait_for_finalization,
prompt=prompt, max_allowed_attempts=max_allowed_attempts,
output_in_place = output_in_place,
cuda=cuda,
dev_id=dev_id,
TPB=TPB,
num_processes=num_processes,
update_interval=update_interval
update_interval=update_interval,
log_verbose=log_verbose,
)

return self
Expand Down
Loading