This commit is contained in:
Alexia Jolicoeur-Martineau
2025-10-07 09:26:04 -04:00
commit 8120f2bdf7
39 changed files with 27428 additions and 0 deletions

24
config/arch/hrm.yaml Normal file
View File

@@ -0,0 +1,24 @@
name: recursive_reasoning.hrm@HierarchicalReasoningModel_ACTV1
loss:
name: losses@ACTLossHead
loss_type: stablemax_cross_entropy
halt_exploration_prob: 0.1
halt_max_steps: 16
H_cycles: 2
L_cycles: 2
H_layers: 4
L_layers: 4
hidden_size: 512
num_heads: 8 # min(2, hidden_size // 64)
expansion: 4
puzzle_emb_ndim: ${.hidden_size}
pos_encodings: rope
forward_dtype: bfloat16
mlp_t: False # use mlp on L instead of transformer

View File

@@ -0,0 +1,18 @@
name: recursive_reasoning.transformers_baseline@Model_ACTV2
loss:
name: losses@ACTLossHead
loss_type: stablemax_cross_entropy
halt_exploration_prob: 0.1
halt_max_steps: 16
H_cycles: 1 # kept for compatibility
H_layers: 8
hidden_size: 512
num_heads: 12
expansion: 4
puzzle_emb_ndim: ${.hidden_size}
pos_encodings: rope

26
config/arch/trm.yaml Normal file
View File

@@ -0,0 +1,26 @@
name: recursive_reasoning.trm@TinyRecursiveReasoningModel_ACTV1
loss:
name: losses@ACTLossHead
loss_type: stablemax_cross_entropy
halt_exploration_prob: 0.1
halt_max_steps: 16
H_cycles: 3
L_cycles: 6
H_layers: 0
L_layers: 2
hidden_size: 512
num_heads: 8 # min(2, hidden_size // 64)
expansion: 4
puzzle_emb_ndim: ${.hidden_size}
pos_encodings: rope
forward_dtype: bfloat16
mlp_t: False # use mlp on L instead of transformer
puzzle_emb_len: 16 # if non-zero, its specified to this value
no_ACT_continue: True # No continue ACT loss, only use the sigmoid of the halt which makes much more sense

View File

@@ -0,0 +1,26 @@
name: recursive_reasoning.trm_hier6@TinyRecursiveReasoningModel_ACTV1
loss:
name: losses@ACTLossHead
loss_type: stablemax_cross_entropy
halt_exploration_prob: 0.1
halt_max_steps: 16
H_cycles: 3
L_cycles: 6
H_layers: 0
L_layers: 2
hidden_size: 512
num_heads: 8 # min(2, hidden_size // 64)
expansion: 4
puzzle_emb_ndim: ${.hidden_size}
pos_encodings: rope
forward_dtype: bfloat16
mlp_t: False # use mlp on L instead of transformer
puzzle_emb_len: 16 # if non-zero, its specified to this value
no_ACT_continue: True # No continue ACT loss, only use the sigmoid of the halt which makes much more sense

View File

@@ -0,0 +1,26 @@
name: recursive_reasoning.trm_singlez@TinyRecursiveReasoningModel_ACTV1
loss:
name: losses@ACTLossHead
loss_type: stablemax_cross_entropy
halt_exploration_prob: 0.1
halt_max_steps: 16
H_cycles: 3
L_cycles: 6
H_layers: 0
L_layers: 2
hidden_size: 512
num_heads: 8 # min(2, hidden_size // 64)
expansion: 4
puzzle_emb_ndim: ${.hidden_size}
pos_encodings: rope
forward_dtype: bfloat16
mlp_t: False # use mlp on L instead of transformer
puzzle_emb_len: 16 # if non-zero, its specified to this value
no_ACT_continue: True # No continue ACT loss, only use the sigmoid of the halt which makes much more sense