Sha256: ccc8d4cb1354a681042a616d5c935f1f42e88c7936fceb1fd4c43e1cef673334
Contents?: true
Size: 1.7 KB
Versions: 16
Compression:
Stored size: 1.7 KB
Contents
# This is an auto-generated partial config. To use it with 'spacy train' # you can run spacy init fill-config to auto-fill all default settings: # python -m spacy init fill-config ./base_config.cfg ./config.cfg [paths] train = null dev = null [system] gpu_allocator = "pytorch" [nlp] lang = "en" pipeline = ["transformer","textcat"] batch_size = 128 [components] [components.transformer] factory = "transformer" [components.transformer.model] @architectures = "spacy-transformers.TransformerModel.v1" name = "emilyalsentzer/Bio_ClinicalBERT" tokenizer_config = {"use_fast": true} [components.transformer.model.get_spans] @span_getters = "spacy-transformers.strided_spans.v1" window = 128 stride = 96 [components.textcat] factory = "textcat" [components.textcat.model] @architectures = "spacy.TextCatEnsemble.v2" nO = null [components.textcat.model.tok2vec] @architectures = "spacy-transformers.TransformerListener.v1" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers = "reduce_mean.v1" [components.textcat.model.linear_model] @architectures = "spacy.TextCatBOW.v1" exclusive_classes = true ngram_size = 1 no_output_layer = false [corpora] [corpora.train] @readers = "spacy.Corpus.v1" path = ${paths.train} max_length = 500 [corpora.dev] @readers = "spacy.Corpus.v1" path = ${paths.dev} max_length = 0 [training] accumulate_gradient = 3 dev_corpus = "corpora.dev" train_corpus = "corpora.train" [training.optimizer] @optimizers = "Adam.v1" [training.optimizer.learn_rate] @schedules = "warmup_linear.v1" warmup_steps = 250 total_steps = 20000 initial_rate = 5e-5 [training.batcher] @batchers = "spacy.batch_by_padded.v1" discard_oversize = true size = 2000 buffer = 256 [initialize] vectors = null
Version data entries
16 entries across 16 versions & 1 rubygems