{ "architectures": [ "GPT" ], "vocab_size": 65536, "n_layer": 24, "n_head": 16, "hidden_size": 2048, "max_position_embeddings": 2048, "model_type": "gpt", "torch_dtype": "bfloat16" }