Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Subtensor and Normalization updates #936

Merged
merged 33 commits into from
Oct 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
aeb2d2b
local train bug fix
Eugene-hu Sep 6, 2022
15775e0
normalization update
Eugene-hu Sep 7, 2022
9a640d4
fix tests
Eugene-hu Sep 7, 2022
25559fd
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 7, 2022
f3ffece
remove test
Eugene-hu Sep 7, 2022
183b153
updated normalization
Eugene-hu Sep 8, 2022
6ad417d
Naming changes, bug fixes
Eugene-hu Sep 10, 2022
23d5b3d
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 12, 2022
4849e55
subtensor update for max clip
Eugene-hu Sep 10, 2022
a5a02e0
max weight to a million
Eugene-hu Sep 14, 2022
25ec064
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 14, 2022
986fdd3
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 14, 2022
f09c328
Fixes for ordering and comments
Eugene-hu Sep 14, 2022
e69f4da
additional tests
Eugene-hu Sep 14, 2022
f9700ee
string fix
Eugene-hu Sep 15, 2022
846d6fb
numerical stability and testing updates
Eugene-hu Sep 15, 2022
671df54
minor update for division by zero
Eugene-hu Sep 15, 2022
1ed517a
Naming and spacing fixes
Eugene-hu Sep 15, 2022
013385a
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 15, 2022
c50a1b0
Merge branch 'normalization_fixes' of https://github.com/opentensor/b…
Eugene-hu Sep 15, 2022
cff539b
epsilon update
Eugene-hu Sep 19, 2022
499b24c
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 20, 2022
07dce9b
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 21, 2022
4fdd30c
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 23, 2022
ad9fe5d
small fix
Eugene-hu Sep 23, 2022
411538d
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Sep 23, 2022
b0b56b5
additional subtensor parameters
Eugene-hu Sep 27, 2022
66360e3
remove print
Eugene-hu Oct 3, 2022
ecc751f
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Oct 4, 2022
8f6ebd4
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Oct 6, 2022
8bf2c56
help string fixes
Eugene-hu Oct 11, 2022
1826541
Merge branch 'normalization_fixes' of https://github.com/opentensor/b…
Eugene-hu Oct 11, 2022
1e04a23
Merge branch 'nobunaga' into normalization_fixes
Eugene-hu Oct 11, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 25 additions & 18 deletions bittensor/_neuron/text/core_validator/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def add_args( cls, parser ):
parser.add_argument('--neuron.wait_for_finalization', action='store_true', help='''when setting weights the miner waits for trnasaction finalization.''', default=False)
parser.add_argument('--neuron.forward_num', type=int, help='''How much forward request before a backward call.''', default=3)
parser.add_argument('--neuron.validation_synapse', type=str, help='''Synapse used for validation.''', default='TextCausalLMNext', choices = ['TextCausalLMNext', 'TextCausalLM'])
parser.add_argument('--neuron.exclude_quantile', type=float, help='Exclude the lowest quantile from weight setting.', default=0.1)
parser.add_argument('--neuron.exclude_quantile', type=float, help='Exclude the lowest quantile from weight setting. (default value: -1, pulling from subtensor directly)', default=-1)

@classmethod
def config ( cls ):
Expand Down Expand Up @@ -388,7 +388,7 @@ def run_epoch( self ):
sequence_length = self.subtensor.validator_sequence_length
validation_len = self.config.neuron.validation_len # Number of tokens to holdout for phrase validation beyond sequence context
min_allowed_weights = self.subtensor.min_allowed_weights
max_allowed_ratio = self.subtensor.max_allowed_min_max_ratio
max_weight_limit = self.subtensor.max_weight_limit
blocks_per_epoch = self.subtensor.validator_epoch_length if self.config.neuron.blocks_per_epoch == -1 else self.config.neuron.blocks_per_epoch
epochs_until_reset = self.subtensor.validator_epochs_per_reset if self.config.neuron.epochs_until_reset == -1 else self.config.neuron.epochs_until_reset

Expand All @@ -410,7 +410,7 @@ def run_epoch( self ):
if self.config.using_wandb:
wandb.log({'era/batch_size': batch_size, 'era/sequence_length': sequence_length,
'era/validation_len': validation_len,
'era/min_allowed_weights': min_allowed_weights, 'era/max_allowed_ratio': max_allowed_ratio,
'era/min_allowed_weights': min_allowed_weights, 'era/max_weight_limit': max_weight_limit,
'era/blocks_per_epoch': blocks_per_epoch, 'era/epochs_until_reset': epochs_until_reset},
step=current_block)

Expand Down Expand Up @@ -572,8 +572,8 @@ def run_epoch( self ):
f'[dim]weights[/dim] sum:{sample_weights.sum().item():.2g} '
f'[white] max:[bold]{sample_weights.max().item():.4g}[/bold] / '
f'min:[bold]{sample_weights.min().item():.4g}[/bold] [/white] '
f'\[{sample_weights.max().item() / sample_weights.min().item():.1f}:1] '
f'({max_allowed_ratio} allowed)')
f'\[{sample_weights.max().item()}:1] '
f'({max_weight_limit} allowed)')

self.subtensor.set_weights(
uids=sample_uids.detach().to('cpu'),
Expand Down Expand Up @@ -678,7 +678,7 @@ def calculate_weights(self, responsive_uids: Set, queried_uids: Set):

# === Randomize UIDs in preferred order (responsive -> queried -> rest) ===
min_allowed_weights = self.subtensor.min_allowed_weights
max_allowed_ratio = self.subtensor.max_allowed_min_max_ratio
max_weight_limit = self.subtensor.max_weight_limit

non_responsive_uids = queried_uids - responsive_uids
non_queried_uids = set(range(self.metagraph.n)) - queried_uids
Expand Down Expand Up @@ -708,32 +708,35 @@ def calculate_weights(self, responsive_uids: Set, queried_uids: Set):
sample_uids = preferred_uids[:weights_to_set] # slice to weights_to_set
sample_weights = neuron_weights[:weights_to_set] # slice to weights_to_set

logger.info(f'{len(sample_weights)} Shapley values | min:{sample_weights.min()} max:{sample_weights.max()}')
# === If no uids responds, return ===
if len(sample_uids) == 0:
return sample_uids, sample_weights

# === Exclude lowest quantile from weight setting ===
max_exclude = (len(sample_weights) - min_allowed_weights) / len(sample_weights) # max excludable weight quantile
quantile = self.subtensor.validator_exclude_quantile if self.config.neuron.exclude_quantile == -1 else self.config.neuron.exclude_quantile
if 0 < max_exclude:
exclude_quantile = min([self.config.neuron.exclude_quantile, max_exclude]) # reduce quantile to meet min_allowed_weights
exclude_quantile = min([quantile , max_exclude]) # reduce quantile to meet min_allowed_weights
lowest_quantile = sample_weights.quantile(exclude_quantile) # find lowest quantile threshold
sample_uids = sample_uids[lowest_quantile <= sample_weights] # exclude uids with weights below quantile
sample_weights = sample_weights[lowest_quantile <= sample_weights] # exclude weights below quantile

logger.info(f'Exclude {exclude_quantile} quantile ({lowest_quantile}) | '
f'{len(sample_weights)} Shapley values | min:{sample_weights.min()} max:{sample_weights.max()}')

# === Normalize and apply max_allowed_ratio ===
sample_weights = bittensor.utils.weight_utils.normalize_max_multiple(x=sample_weights,
multiple=max_allowed_ratio)
logger.info(f'{len(sample_weights)} normalize_max_multiple | '
f'min:{sample_weights.min()} max:{sample_weights.max()}')
# === Normalize and apply max_weight_limit ===
sample_weights = bittensor.utils.weight_utils.normalize_max_weight(x=sample_weights,
limit=max_weight_limit)
logger.info(f'{len(sample_weights)} normalize_max_weight | '
f'max:{sample_weights.max()}')

return sample_uids, sample_weights

def weights_table(self, sample_uids, sample_weights, include_uids=None, num_rows: int = None):
r""" Prints weights table given sample_uids and sample_weights.
"""
min_allowed_weights = self.subtensor.min_allowed_weights
max_allowed_ratio = self.subtensor.max_allowed_min_max_ratio
max_weight_limit = self.subtensor.max_weight_limit

# === Weight table ===
# Prints exponential moving average statistics of valid neurons and latest weights
Expand Down Expand Up @@ -763,8 +766,8 @@ def weights_table(self, sample_uids, sample_weights, include_uids=None, num_rows
f'sum:{sample_weights.sum().item():.2g} '
f'[white] max:[bold]{sample_weights.max().item():.4g}[/bold] / '
f'min:[bold]{sample_weights.min().item():.4g}[/bold] [/white] '
f'\[{sample_weights.max().item() / sample_weights.min().item():.1f}:1] '
f'({max_allowed_ratio} allowed)', # caption
f'\[{sample_weights.max().item()}:1] '
f'({max_weight_limit} allowed)', # caption
mark_uids=avail_include_uids)


Expand All @@ -774,6 +777,10 @@ class nucleus( torch.nn.Module ):
def __init__( self, config, device, subtensor ):
super(nucleus, self).__init__()
self.config = config

self.config.nucleus.scaling_law_power = subtensor.scaling_law_power if self.config.nucleus.scaling_law_power == -1 else self.config.nucleus.scaling_law_power
self.config.nucleus.synergy_scaling_law_power = subtensor.synergy_scaling_law_power if self.config.nucleus.synergy_scaling_law_power == -1 else self.config.nucleus.synergy_scaling_law_power

self.device = device
self.max_n = subtensor.max_n
self.permute_uids = [] # iterable of next UIDs to query, reset to permuted UIDs when empty
Expand Down Expand Up @@ -818,8 +825,8 @@ def add_args( cls, parser ):
parser.add_argument('--nucleus.importance', type=float, help='hyperparameter for the importance loss', default=3)
parser.add_argument('--nucleus.noise_multiplier', type=float, help='Standard deviation multipler on weights', default=2 )
parser.add_argument('--nucleus.dendrite_backward', action='store_true', help='Pass backward request to the server side or not', default=False )
parser.add_argument('--nucleus.scaling_law_power', type=float, help='Power for modified scaling law, powered down to improve dynamic range, e.g. 3 → 6 nats for 0.5.', default=0.5)
parser.add_argument('--nucleus.synergy_scaling_law_power', type=float, help='Power for synergy modified scaling law, powered down to improve dynamic range, e.g. 3 → 6 nats for 0.5.', default=0.6)
parser.add_argument('--nucleus.scaling_law_power', type=float, help='Power for modified scaling law, powered down to improve dynamic range, e.g. 3 → 6 nats for 0.5. (default value: -1, pulling from subtensor directly)', default=-1)
parser.add_argument('--nucleus.synergy_scaling_law_power', type=float, help='Power for synergy modified scaling law, powered down to improve dynamic range, e.g. 3 → 6 nats for 0.5. (default value: -1, pulling from subtensor directly)', default=-1)

@classmethod
def config ( cls ):
Expand Down
86 changes: 71 additions & 15 deletions bittensor/_subtensor/subtensor_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def rho (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'Rho').value
return substrate.query( module='SubtensorModule', storage_function = 'Rho' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -157,7 +157,7 @@ def kappa (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'Kappa').value
return substrate.query( module='SubtensorModule', storage_function = 'Kappa' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -170,7 +170,7 @@ def difficulty (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'Difficulty').value
return substrate.query( module='SubtensorModule', storage_function = 'Difficulty' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -196,7 +196,7 @@ def immunity_period (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'ImmunityPeriod').value
return substrate.query( module='SubtensorModule', storage_function = 'ImmunityPeriod' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -209,7 +209,7 @@ def validator_batch_size (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorBatchSize').value
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorBatchSize' ).value
return make_substrate_call_with_retry()


Expand All @@ -223,7 +223,7 @@ def validator_sequence_length (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorSequenceLength').value
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorSequenceLength' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -236,7 +236,7 @@ def validator_epochs_per_reset (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorEpochsPerReset').value
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorEpochsPerReset' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -249,7 +249,7 @@ def validator_epoch_length (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorEpochLen').value
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorEpochLen' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -262,7 +262,7 @@ def total_stake (self) -> 'bittensor.Balance':
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return bittensor.Balance.from_rao( substrate.query( module='SubtensorModule', storage_function = 'TotalStake').value )
return bittensor.Balance.from_rao( substrate.query( module='SubtensorModule', storage_function = 'TotalStake' ).value )
return make_substrate_call_with_retry()

@property
Expand All @@ -275,7 +275,63 @@ def min_allowed_weights (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'MinAllowedWeights').value
return substrate.query( module='SubtensorModule', storage_function = 'MinAllowedWeights' ).value
return make_substrate_call_with_retry()

@property
def max_weight_limit (self) -> int:
r""" Returns MaxWeightLimit
Returns:
max_weight (int):
the max value for weights after normalizaiton
"""
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
U32_MAX = 4294967295
return substrate.query( module='SubtensorModule', storage_function = 'MaxWeightLimit' ).value/U32_MAX
return make_substrate_call_with_retry()

@property
def scaling_law_power (self) -> int:
r""" Returns ScalingLawPower
Returns:
ScalingLawPower (float):
the power term attached to scaling law
"""
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
MAX = 100
return substrate.query( module='SubtensorModule', storage_function = 'ScalingLawPower' ).value/MAX
return make_substrate_call_with_retry()

@property
def synergy_scaling_law_power (self) -> int:
r""" Returns SynergyScalingLawPower
Returns:
SynergyScalingLawPower (float):
the term attached to synergy calculation during shapley scores
"""
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
MAX = 100
return substrate.query( module='SubtensorModule', storage_function = 'SynergyScalingLawPower' ).value/MAX
return make_substrate_call_with_retry()

@property
def validator_exclude_quantile (self) -> int:
r""" Returns ValidatorExcludeQuantile
Returns:
ValidatorExcludeQuantile (float):
the quantile that validators should exclude when setting their weights
"""
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
MAX = 100
return substrate.query( module='SubtensorModule', storage_function = 'ValidatorExcludeQuantile' ).value/MAX
return make_substrate_call_with_retry()

@property
Expand All @@ -288,7 +344,7 @@ def max_allowed_min_max_ratio(self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'MaxAllowedMaxMinRatio').value
return substrate.query( module='SubtensorModule', storage_function = 'MaxAllowedMaxMinRatio' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -301,7 +357,7 @@ def n (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'N').value
return substrate.query( module='SubtensorModule', storage_function = 'N' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -314,7 +370,7 @@ def max_n (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'MaxAllowedUids').value
return substrate.query( module='SubtensorModule', storage_function = 'MaxAllowedUids' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -336,7 +392,7 @@ def blocks_since_epoch (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'BlocksSinceLastStep').value
return substrate.query( module='SubtensorModule', storage_function = 'BlocksSinceLastStep' ).value
return make_substrate_call_with_retry()

@property
Expand All @@ -349,7 +405,7 @@ def blocks_per_epoch (self) -> int:
@retry(delay=2, tries=3, backoff=2, max_delay=4)
def make_substrate_call_with_retry():
with self.substrate as substrate:
return substrate.query( module='SubtensorModule', storage_function = 'BlocksPerStep').value
return substrate.query( module='SubtensorModule', storage_function = 'BlocksPerStep' ).value
return make_substrate_call_with_retry()

def get_n (self, block: int = None) -> int:
Expand Down
Loading