From 79169c26130a0abd6a937f6e1d2411f83ef37bd0 Mon Sep 17 00:00:00 2001 From: yujun <50394665+JunnYu@users.noreply.github.com> Date: Tue, 19 Oct 2021 12:14:33 +0800 Subject: [PATCH] update docs --- .../ckiplab-bert-base-chinese-ner/README.md | 2 +- docs/model_zoo/transformers.rst | 34 +++++++++++++++---- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/community/junnyu/ckiplab-bert-base-chinese-ner/README.md b/community/junnyu/ckiplab-bert-base-chinese-ner/README.md index 5b5bbb1da45a..49f895f67da3 100644 --- a/community/junnyu/ckiplab-bert-base-chinese-ner/README.md +++ b/community/junnyu/ckiplab-bert-base-chinese-ner/README.md @@ -12,7 +12,7 @@ import paddle import paddle.nn.functional as F from paddlenlp.transformers import BertForTokenClassification, BertTokenizer -path = "ckiplab-bert-base-chinese-ner" +path = "junnyu/ckiplab-bert-base-chinese-ner" model = BertForTokenClassification.from_pretrained(path) model.eval() tokenizer = BertTokenizer.from_pretrained(path) diff --git a/docs/model_zoo/transformers.rst b/docs/model_zoo/transformers.rst index 7c82a88d2948..4f3e723c1700 100644 --- a/docs/model_zoo/transformers.rst +++ b/docs/model_zoo/transformers.rst @@ -9,8 +9,8 @@ PaddleNLP为用户提供了常用的 ``BERT``、``ERNIE``、``ALBERT``、``RoBER Transformer预训练模型汇总 ------------------------------------ -下表汇总了介绍了目前PaddleNLP支持的各类预训练模型以及对应预训练权重。我们目前提供了**21**种网络结构, **91** 种预训练的参数权重供用户使用, -其中包含了 **45** 种中文语言模型的预训练权重。 +下表汇总了介绍了目前PaddleNLP支持的各类预训练模型以及对应预训练权重。我们目前提供了**21**种网络结构, **96** 种预训练的参数权重供用户使用, +其中包含了 **48** 种中文语言模型的预训练权重。 +--------------------+-----------------------------------------+--------------+-----------------------------------------+ | Model | Pretrained Weight | Language | Details of the model | @@ -124,6 +124,31 @@ Transformer预训练模型汇总 | | | | and Traditional text using | | | | | Whole-Word-Masking with extented data. | | +-----------------------------------------+--------------+-----------------------------------------+ +| |``junnyu/ckiplab-bert-base-chinese-ner`` | Chinese | 12-layer, 768-hidden, | +| | | | 12-heads, 102M parameters. | +| | | | Finetuned on NER task. | +| +-----------------------------------------+--------------+-----------------------------------------+ +| |``junnyu/ckiplab-bert-base-chinese-pos`` | Chinese | 12-layer, 768-hidden, | +| | | | 12-heads, 102M parameters. | +| | | | Finetuned on POS task. | +| +-----------------------------------------+--------------+-----------------------------------------+ +| |``junnyu/ckiplab-bert-base-chinese-ws`` | Chinese | 12-layer, 768-hidden, | +| | | | 12-heads, 102M parameters. | +| | | | Finetuned on WS task. | +| +-----------------------------------------+--------------+-----------------------------------------+ +| |``junnyu/nlptown-bert-base-`` | Multilingual | 12-layer, 768-hidden, | +| |``multilingual-uncased-sentiment`` | | 12-heads, 167M parameters. | +| | | | Finetuned for sentiment analysis on | +| | | | product reviews in six languages: | +| | | | English, Dutch, German, French, | +| | | | Spanish and Italian. | +| +-----------------------------------------+--------------+-----------------------------------------+ +| |``junnyu/tbs17-MathBERT`` | English | 12-layer, 768-hidden, | +| | | | 12-heads, 110M parameters. | +| | | | Trained on pre-k to graduate math | +| | | | language (English) using a masked | +| | | | language modeling (MLM) objective. | +| +-----------------------------------------+--------------+-----------------------------------------+ | |``macbert-base-chinese`` | Chinese | 12-layer, 768-hidden, | | | | | 12-heads, 102M parameters. | | | | | Trained with novel MLM as correction | @@ -133,11 +158,6 @@ Transformer预训练模型汇总 | | | | 16-heads, 326M parameters. | | | | | Trained with novel MLM as correction | | | | | pre-training task. | -| +-----------------------------------------+--------------+-----------------------------------------+ -| |``simbert-base-chinese`` | Chinese | 12-layer, 768-hidden, | -| | | | 12-heads, 108M parameters. | -| | | | Trained on 22 million pairs of similar | -| | | | sentences crawed from Baidu Know. | +--------------------+-----------------------------------------+--------------+-----------------------------------------+ |BigBird_ |``bigbird-base-uncased`` | English | 12-layer, 768-hidden, | | | | | 12-heads, _M parameters. |