{ "n_embed": 64, "n_head": 4, "n_layer": 4, "dropout": 0.1, "vocab_size": 22, "block_size": 14, "architectures": ["TinyLLM"], "model_type": "tiny-causal-llm", "_from_model_config": true }