Add Llama 3.1 example showcasing optional head‑dimension support.

This commit is contained in:
Foke Singh 2025-10-23 17:54:48 +00:00
parent 675cefaf26
commit 7bdce4c078

View File

@ -18,7 +18,7 @@ pub const LlamaLM = struct {
int: u32,
ints: []u32,
}),
head_dim: ?u32,
head_dim: ?u32 = null,
hidden_size: u32,
num_hidden_layers: u32,
num_attention_heads: u32,