ModelInputConfigΒΆ
Component: Module
-
class
pytext.config.pair_classification.
ModelInputConfig
[source] Bases:
Module.Config
All Attributes (including base classes)
- load_path: Optional[str] =
None
- save_path: Optional[str] =
None
- freeze: bool =
False
- shared_module_key: Optional[str] =
None
- text1: WordEmbedding.Config = WordEmbedding.Config()
- text2: WordEmbedding.Config = WordEmbedding.Config()
Default JSON
{
"load_path": null,
"save_path": null,
"freeze": false,
"shared_module_key": null,
"text1": {
"embed_dim": 100,
"freeze": false,
"embedding_init_strategy": "random",
"embedding_init_range": null,
"export_input_names": [
"tokens_vals"
],
"pretrained_embeddings_path": "",
"vocab_file": "",
"vocab_size": 0,
"vocab_from_train_data": true,
"vocab_from_all_data": false,
"vocab_from_pretrained_embeddings": false,
"lowercase_tokens": true,
"min_freq": 1,
"mlp_layer_dims": []
},
"text2": {
"embed_dim": 100,
"freeze": false,
"embedding_init_strategy": "random",
"embedding_init_range": null,
"export_input_names": [
"tokens_vals"
],
"pretrained_embeddings_path": "",
"vocab_file": "",
"vocab_size": 0,
"vocab_from_train_data": true,
"vocab_from_all_data": false,
"vocab_from_pretrained_embeddings": false,
"lowercase_tokens": true,
"min_freq": 1,
"mlp_layer_dims": []
}
}