Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Incentivize open source contributions.

## How it Works

Miners register with a fine-grained [GitHub personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) (PAT) and contribute to whitelisted open source repositories. When their pull requests get merged, validators authenticate account ownership via the PAT, verify the merged contributions, and score them based on code quality, repository weight, and programming language factors. Rewards are distributed proportionally to contribution scores.
Miners register with a fine-grained [GitHub personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) (PAT) and contribute to whitelisted open source repositories. When their pull requests get merged, validators authenticate account ownership via the PAT, verify the merged contributions, and score them based on code quality and programming language factors. Rewards are allocated by each repository's bounded `emission_share`, then distributed proportionally to contribution scores within that repository.

## Why Gittensor

Expand Down Expand Up @@ -75,11 +75,11 @@ See full guide **[here](https://docs.gittensor.io/validator.html)**

### Important Structures

- Master Repositories & Weights
- Master Repositories & Emission Shares

A list of repositories pulled from GitHub that have been deemed valid for scoring. They each have an associated weight based on factors like: forks, commits, contributors, stars, etc.
A list of repositories pulled from GitHub that have been deemed valid for scoring. They each have an associated `emission_share` that caps how much of the scoring pool that repository can receive in a round.

_NOTE: this list will be dynamic. It will see various audits, additions, deletions, weight changes, and shuffles as the subnet matures._
_NOTE: this list will be dynamic. It will see various audits, additions, deletions, emission share changes, and shuffles as the subnet matures._

_NOTE: don’t be afraid to provide recommendations for your favorite open source repositories and the team will review it as a possible addition. A repo is more likely to be included if: they provide contributing guidelines, are active/community driven, provide value/have users_

Expand Down
5 changes: 4 additions & 1 deletion gittensor/classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,6 @@ def is_pioneer_eligible(self) -> bool:
def calculate_final_earned_score(self) -> float:
"""Combine base score with all multipliers. Pioneer dividend is added separately after."""
multipliers = {
'repo': self.repo_weight_multiplier,
'issue': self.issue_multiplier,
'label': self.label_multiplier,
'spam': self.open_pr_spam_multiplier,
Expand Down Expand Up @@ -289,6 +288,7 @@ class MinerEvaluation:
total_valid_solved_issues: int = 0 # solved issues where solving PR has token_score >= 5
total_closed_issues: int = 0
total_open_issues: int = 0 # mirror-tracked open issues in lookback window (set by issue_discovery.scan)
discovered_issues: List[Issue] = field(default_factory=list)

@property
def total_prs(self) -> int:
Expand Down Expand Up @@ -505,6 +505,7 @@ class CachedEvaluation:
'total_valid_solved_issues',
'total_closed_issues',
'total_open_issues',
'discovered_issues',
)


Expand Down Expand Up @@ -627,6 +628,7 @@ def _build_cache_entry(evaluation: 'MinerEvaluation') -> 'MinerEvaluation':
cached.merged_prs = [_scored_mirror_pr_for_cache(pr) for pr in evaluation.merged_prs]
cached.open_prs = [_scored_mirror_pr_for_cache(pr) for pr in evaluation.open_prs]
cached.closed_prs = [_scored_mirror_pr_for_cache(pr) for pr in evaluation.closed_prs]
cached.discovered_issues = list(evaluation.discovered_issues)
return cached

@staticmethod
Expand All @@ -636,6 +638,7 @@ def _isolate_for_downstream(cached_eval: 'MinerEvaluation') -> 'MinerEvaluation'
# adapters produce fresh Issue objects per call via get_all_issues().
copy_eval = copy.copy(cached_eval)
copy_eval.unique_repos_contributed_to = set(cached_eval.unique_repos_contributed_to)
copy_eval.discovered_issues = list(cached_eval.discovered_issues)
return copy_eval


Expand Down
3 changes: 2 additions & 1 deletion gittensor/cli/miner_commands/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ def store_or_use_cached_evaluation(self, miner_evaluations: Dict) -> Set[int]:
'merged_prs',
'open_prs',
'closed_prs',
'discovered_issues',
'unique_repos_contributed_to',
}
)
Expand Down Expand Up @@ -280,7 +281,7 @@ async def _run() -> Dict[str, Any]:
issue_rewards = await issue_discovery(
miner_evaluations, master_repositories, programming_languages, token_config, miner_uids
)
rewards = blend_emission_pools(oss_rewards, issue_rewards, miner_uids)
rewards = blend_emission_pools(miner_evaluations, master_repositories, miner_uids)

return {
'success': True,
Expand Down
12 changes: 5 additions & 7 deletions gittensor/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
# =============================================================================
# Repository & PR Scoring
# =============================================================================
DEFAULT_REPO_WEIGHT = 0.01 # fallback weight for repos not in master_repositories.json
DEFAULT_REPO_EMISSION_SHARE = 0.01 # fallback share for repos not in master_repositories.json
PR_LOOKBACK_DAYS = 35 # rolling window for scoring
MERGED_PR_BASE_SCORE = 25
MIN_TOKEN_SCORE_FOR_BASE_SCORE = 5 # PRs below this get 0 base score
Expand Down Expand Up @@ -154,11 +154,9 @@
# =============================================================================
RECYCLE_UID = 0

# Hardcoded emission splits per competition (replaces dynamic emissions)
OSS_EMISSION_SHARE = 0.30 # 30% to OSS contributions (PR scoring)
ISSUE_DISCOVERY_EMISSION_SHARE = 0.10 # 10% to issue discovery
RECYCLE_EMISSION_SHARE = 0.45 # 45% to recycle UID 0
# ISSUES_TREASURY_EMISSION_SHARE = 0.15 defined below (15% to smart contract treasury)
# Scoring pool is allocated by per-repo emission_share, then split within each
# repo between PR scoring and issue discovery.
OSS_EMISSION_SHARE = 0.90

# =============================================================================
# Spam & Gaming Mitigation
Expand Down Expand Up @@ -187,5 +185,5 @@
# =============================================================================
CONTRACT_ADDRESS = '5FWNdk8YNtNcHKrAx2krqenFrFAZG7vmsd2XN2isJSew3MrD'
ISSUES_TREASURY_UID = 111 # UID of the smart contract neuron, if set to RECYCLE_UID then it's disabled
ISSUES_TREASURY_EMISSION_SHARE = 0.15 # % of emissions allocated to funding issues treasury
ISSUES_TREASURY_EMISSION_SHARE = 0.10 # % of emissions allocated to funding issues treasury
MAX_ISSUE_ID = 1_000_000 # sanity-check upper bound for any real deployment
162 changes: 120 additions & 42 deletions gittensor/validator/forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,17 @@
# Copyright © 2025 Entrius

import asyncio
from typing import TYPE_CHECKING, Dict, Optional, Set, Tuple
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple

import bittensor as bt
import numpy as np

from gittensor.classes import MinerEvaluation, MinerEvaluationCache
from gittensor.constants import (
ISSUE_DISCOVERY_EMISSION_SHARE,
ISSUES_TREASURY_EMISSION_SHARE,
ISSUES_TREASURY_UID,
OSS_EMISSION_SHARE,
RECYCLE_EMISSION_SHARE,
RECYCLE_UID,
)
from gittensor.utils.uids import get_all_uids
Expand Down Expand Up @@ -48,11 +47,10 @@ async def forward(self: 'Validator') -> None:
4. Store all evaluations to DB
5. Blend emission pools and update scores

Emission blending (hardcoded per-competition):
- OSS contributions: 30%
- Issue discovery: 30%
- Issue treasury: 15% (flat to UID 111)
- Recycle: 25% (flat to UID 0)
Emission blending:
- OSS scoring pool: 90%, allocated by repository emission_share
- Issue treasury: 10% (flat to UID 111)
- Recycle: registry slack and inactive repo slices
"""

if self.step % VALIDATOR_STEPS_INTERVAL == 0:
Expand All @@ -62,12 +60,12 @@ async def forward(self: 'Validator') -> None:
token_config = load_token_config()

# 1. Score OSS contributions
oss_rewards, miner_evaluations, cached_uids, penalized_uids = await oss_contributions(
_oss_rewards, miner_evaluations, cached_uids, penalized_uids = await oss_contributions(
self, miner_uids, master_repositories, programming_languages, token_config
)

# 2. Score issue discovery
issue_rewards = await issue_discovery(
_issue_rewards = await issue_discovery(
miner_evaluations,
master_repositories,
programming_languages,
Expand All @@ -85,8 +83,8 @@ async def forward(self: 'Validator') -> None:
# 4. Store all evaluations to DB (includes issue discovery fields)
await self.bulk_store_evaluation(miner_evaluations, skip_uids=cached_uids)

# 5. Blend 4 emission pools into final rewards
rewards = blend_emission_pools(oss_rewards, issue_rewards, miner_uids)
# 5. Allocate the scoring pool by per-repo emission_share
rewards = blend_emission_pools(miner_evaluations, master_repositories, miner_uids)

self.update_scores(rewards, miner_uids, blacklisted_uids=sorted(penalized_uids))

Expand Down Expand Up @@ -150,49 +148,129 @@ async def issue_discovery(


def blend_emission_pools(
oss_rewards: np.ndarray,
issue_rewards: np.ndarray,
miner_evaluations: Dict[int, MinerEvaluation],
master_repositories: Dict[str, RepositoryConfig],
miner_uids: set[int],
) -> np.ndarray:
"""Blend 4 emission pools into a single rewards array.
"""Allocate emissions by configured repo slices and route slack to recycle.

- OSS contributions: 30%
- Issue discovery: 30%
- Issue treasury: 15% (flat to UID 111)
- Recycle: 25% (flat to UID 0)
Each repository receives at most ``emission_share * OSS_EMISSION_SHARE``.
That repo slice is divided proportionally by raw PR and issue-discovery
scores inside the repo. Registry slack and repo slices with no enabled
nonzero scorers route to the recycle UID.
"""
sorted_uids = sorted(miner_uids)
rewards = np.zeros(len(sorted_uids))
recycle_extra = 0.0

# Pool 1: OSS contributions (30%)
oss_total = float(oss_rewards.sum())
if oss_total > 0:
rewards += oss_rewards * OSS_EMISSION_SHARE
else:
recycle_extra += OSS_EMISSION_SHARE

# Pool 2: Issue discovery (30%)
issue_total = float(issue_rewards.sum())
if issue_total > 0:
rewards += issue_rewards * ISSUE_DISCOVERY_EMISSION_SHARE
else:
recycle_extra += ISSUE_DISCOVERY_EMISSION_SHARE

# Pool 3: Issue treasury (15% flat to UID 111)
uid_index = {uid: idx for idx, uid in enumerate(sorted_uids)}

recycle_amount = allocate_repo_scoring_pool(rewards, uid_index, miner_evaluations, master_repositories)

# Issue treasury (10% flat to UID 111)
if ISSUES_TREASURY_UID > 0 and ISSUES_TREASURY_UID in miner_uids:
treasury_idx = sorted_uids.index(ISSUES_TREASURY_UID)
treasury_idx = uid_index[ISSUES_TREASURY_UID]
rewards[treasury_idx] += ISSUES_TREASURY_EMISSION_SHARE
bt.logging.info(
f'Treasury allocation: UID {ISSUES_TREASURY_UID} receives '
f'{ISSUES_TREASURY_EMISSION_SHARE * 100:.0f}% of emissions'
)

# Pool 4: Recycle (25% + unclaimed from empty pools)
# Recycle receives registry slack plus unclaimed repo slices. There is no
# fixed recycle baseline under the emission_share allocation model.
if RECYCLE_UID in miner_uids:
recycle_idx = sorted_uids.index(RECYCLE_UID)
rewards[recycle_idx] += RECYCLE_EMISSION_SHARE + recycle_extra
if recycle_extra > 0:
bt.logging.info(f'Recycling {recycle_extra * 100:.0f}% unclaimed emissions from empty pools')
recycle_idx = uid_index[RECYCLE_UID]
rewards[recycle_idx] += recycle_amount
if recycle_amount > 0:
bt.logging.info(f'Recycling {recycle_amount * 100:.2f}% unclaimed scoring-pool emissions')

return rewards


def allocate_repo_scoring_pool(
rewards: np.ndarray,
uid_index: Dict[int, int],
miner_evaluations: Dict[int, MinerEvaluation],
master_repositories: Dict[str, RepositoryConfig],
) -> float:
"""Distribute the OSS scoring pool by repository emission shares.

Returns the amount that should be paid to the recycle UID.
"""
pr_scores, issue_scores = _collect_repo_scores(miner_evaluations)
configured_share = sum(config.emission_share for config in master_repositories.values())
recycle_amount = max(0.0, 1.0 - configured_share) * OSS_EMISSION_SHARE

if recycle_amount > 0:
bt.logging.info(f'Registry emission_share slack: {recycle_amount * 100:.2f}% routed to recycle')

for repo_name, config in master_repositories.items():
repo_key = repo_name.lower()
repo_slice = config.emission_share * OSS_EMISSION_SHARE
if repo_slice <= 0:
continue

pr_entries = pr_scores.get(repo_key, [])
issue_entries = issue_scores.get(repo_key, [])
pr_total = sum(score for _, score in pr_entries)
issue_total = sum(score for _, score in issue_entries)

issue_share = config.issue_discovery_share
pr_share = 1.0 - issue_share
pr_active = pr_share > 0 and pr_total > 0
issue_active = issue_share > 0 and issue_total > 0

if not pr_active and not issue_active:
recycle_amount += repo_slice
continue

if pr_active and issue_active:
_distribute_entries(rewards, uid_index, pr_entries, repo_slice * pr_share, pr_total)
_distribute_entries(rewards, uid_index, issue_entries, repo_slice * issue_share, issue_total)
elif pr_active:
_distribute_entries(rewards, uid_index, pr_entries, repo_slice, pr_total)
else:
_distribute_entries(rewards, uid_index, issue_entries, repo_slice, issue_total)

return recycle_amount


def _collect_repo_scores(
miner_evaluations: Dict[int, MinerEvaluation],
) -> Tuple[Dict[str, list[Tuple[int, float]]], Dict[str, list[Tuple[int, float]]]]:
pr_scores: Dict[str, list[Tuple[int, float]]] = defaultdict(list)
issue_scores: Dict[str, list[Tuple[int, float]]] = defaultdict(list)

for uid, evaluation in miner_evaluations.items():
for pr in _positive_pr_scores(evaluation):
pr_scores[pr.repository_full_name.lower()].append((uid, float(pr.earned_score)))
for issue in _positive_issue_scores(evaluation):
issue_scores[issue.repository_full_name.lower()].append((uid, float(issue.discovery_earned_score)))

return pr_scores, issue_scores


def _positive_pr_scores(evaluation: MinerEvaluation) -> Iterable:
return (pr for pr in evaluation.merged_prs if getattr(pr, 'earned_score', 0.0) > 0)


def _positive_issue_scores(evaluation: MinerEvaluation) -> Iterable:
return (
issue
for issue in getattr(evaluation, 'discovered_issues', [])
if getattr(issue, 'discovery_earned_score', 0.0) > 0
)


def _distribute_entries(
rewards: np.ndarray,
uid_index: Dict[int, int],
entries: list[Tuple[int, float]],
allocation: float,
total_score: float,
) -> None:
if allocation <= 0 or total_score <= 0:
return
for uid, score in entries:
idx = uid_index.get(uid)
if idx is None:
continue
rewards[idx] += allocation * score / total_score
8 changes: 5 additions & 3 deletions gittensor/validator/issue_discovery/scan.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@
LanguageConfig,
RepositoryConfig,
TokenConfig,
resolve_repo_weight,
)


Expand Down Expand Up @@ -224,6 +223,7 @@ def _clear_issue_discovery_fields(evaluation: MinerEvaluation) -> None:
evaluation.total_valid_solved_issues = 0
evaluation.total_closed_issues = 0
evaluation.total_open_issues = 0
evaluation.discovered_issues = []


def _copy_issue_discovery_fields(target: MinerEvaluation, source: MinerEvaluation) -> None:
Expand All @@ -235,6 +235,7 @@ def _copy_issue_discovery_fields(target: MinerEvaluation, source: MinerEvaluatio
target.total_valid_solved_issues = source.total_valid_solved_issues
target.total_closed_issues = source.total_closed_issues
target.total_open_issues = source.total_open_issues
target.discovered_issues = list(source.discovered_issues)


def _restore_issue_discovery_from_cache(
Expand Down Expand Up @@ -435,6 +436,7 @@ async def _score_miner_issues(
evaluation.total_closed_issues = closed_count
evaluation.total_open_issues = open_issue_count
evaluation.issue_token_score = round(issue_token_score, 2)
evaluation.discovered_issues = []

is_eligible, credibility, reason = check_issue_eligibility(solved_count, valid_solved_count, closed_count)
evaluation.is_issue_eligible = is_eligible
Expand All @@ -456,7 +458,6 @@ async def _score_miner_issues(
issue.discovery_open_issue_spam_multiplier = spam_mult
issue.discovery_earned_score = round(
issue.discovery_base_score
* issue.discovery_repo_weight_multiplier
* issue.discovery_time_decay_multiplier
* issue.discovery_review_quality_multiplier
* issue.discovery_credibility_multiplier
Expand All @@ -465,6 +466,7 @@ async def _score_miner_issues(
)
total_discovery_score += issue.discovery_earned_score

evaluation.discovered_issues = scored_issues
evaluation.issue_discovery_score = round(total_discovery_score, 2)

bt.logging.info(
Expand Down Expand Up @@ -620,7 +622,7 @@ def _mirror_issue_for_scoring(
)

adapted.discovery_base_score = base_score
adapted.discovery_repo_weight_multiplier = resolve_repo_weight(repo_config)
adapted.discovery_repo_weight_multiplier = 1.0
adapted.discovery_time_decay_multiplier = round(calculate_time_decay(solving_pr.merged_at), 2)
adapted.discovery_review_quality_multiplier = round(
calculate_issue_review_quality_multiplier(solving_pr.review_summary.maintainer_changes_requested_count),
Expand Down
Loading