llama3-70b-accelerator / config.json
sahilsuneja's picture
Update config.json
16746a2 verified
raw
history blame contribute delete
441 Bytes
{
"base_model_name_or_path": "meta-llama/Meta-Llama-3-70B-Instruct",
"architectures": [
"MLPSpeculatorPreTrainedModel"
],
"emb_dim": 8192,
"inner_dim": 8192,
"model_type": "mlp_speculator",
"n_candidates": 4,
"n_predict": 4,
"scale_input": true,
"tie_weights": true,
"top_k_tokens_per_head": [
4,
3,
2,
2
],
"torch_dtype": "float16",
"transformers_version": "4.41.2",
"vocab_size": 128256
}