Skip to content

Commit a943a0d

Browse files
authored
Merge pull request #1 from modudeepnlp/feature/init
Feature/init
2 parents 7a9aae1 + 615e8e3 commit a943a0d

39 files changed

+6132
-2
lines changed

.gitignore

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ dmypy.json
125125
tensorflow_code
126126

127127
# Models
128-
models
128+
transformers_ace/MODELS
129129
proc_data
130130

131131
# examples
@@ -137,4 +137,14 @@ examples/runs
137137
serialization_dir
138138

139139
# emacs
140-
*.*~
140+
*.*~
141+
142+
# output directories
143+
transformers_ace/CORPUS/
144+
transformers_ace/DATA/
145+
transformers_ace/MODELS/
146+
transformers_ace/MODELS_TF/
147+
transformers_ace/SUMMARY/
148+
transformers_ace/TASK_DATA/
149+
transformers_ace/TASK_RESULTS/
150+

transformers/configuration_albert.py

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
# coding=utf-8
2+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
""" ALBERT model configuration """
17+
18+
from .configuration_utils import PretrainedConfig
19+
20+
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
21+
'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-config.json",
22+
'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-config.json",
23+
'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-config.json",
24+
'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-config.json",
25+
'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-config.json",
26+
'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-config.json",
27+
'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-config.json",
28+
'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-config.json",
29+
}
30+
31+
32+
class AlbertConfig(PretrainedConfig):
33+
"""Configuration for `AlbertModel`.
34+
35+
The default settings match the configuration of model `albert_xxlarge`.
36+
"""
37+
38+
pretrained_config_archive_map = ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
39+
40+
def __init__(self,
41+
vocab_size_or_config_json_file=30000,
42+
embedding_size=128,
43+
hidden_size=4096,
44+
num_hidden_layers=12,
45+
num_hidden_groups=1,
46+
num_attention_heads=64,
47+
intermediate_size=16384,
48+
inner_group_num=1,
49+
hidden_act="gelu_new",
50+
hidden_dropout_prob=0,
51+
attention_probs_dropout_prob=0,
52+
max_position_embeddings=512,
53+
type_vocab_size=2,
54+
initializer_range=0.02,
55+
layer_norm_eps=1e-12, **kwargs):
56+
"""Constructs AlbertConfig.
57+
58+
Args:
59+
vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.
60+
embedding_size: size of voc embeddings.
61+
hidden_size: Size of the encoder layers and the pooler layer.
62+
num_hidden_layers: Number of hidden layers in the Transformer encoder.
63+
num_hidden_groups: Number of group for the hidden layers, parameters in
64+
the same group are shared.
65+
num_attention_heads: Number of attention heads for each attention layer in
66+
the Transformer encoder.
67+
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
68+
layer in the Transformer encoder.
69+
inner_group_num: int, number of inner repetition of attention and ffn.
70+
down_scale_factor: float, the scale to apply
71+
hidden_act: The non-linear activation function (function or string) in the
72+
encoder and pooler.
73+
hidden_dropout_prob: The dropout probability for all fully connected
74+
layers in the embeddings, encoder, and pooler.
75+
attention_probs_dropout_prob: The dropout ratio for the attention
76+
probabilities.
77+
max_position_embeddings: The maximum sequence length that this model might
78+
ever be used with. Typically set this to something large just in case
79+
(e.g., 512 or 1024 or 2048).
80+
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
81+
`AlbertModel`.
82+
initializer_range: The stdev of the truncated_normal_initializer for
83+
initializing all weight matrices.
84+
"""
85+
super(AlbertConfig, self).__init__(**kwargs)
86+
87+
self.vocab_size = vocab_size_or_config_json_file
88+
self.embedding_size = embedding_size
89+
self.hidden_size = hidden_size
90+
self.num_hidden_layers = num_hidden_layers
91+
self.num_hidden_groups = num_hidden_groups
92+
self.num_attention_heads = num_attention_heads
93+
self.inner_group_num = inner_group_num
94+
self.hidden_act = hidden_act
95+
self.intermediate_size = intermediate_size
96+
self.hidden_dropout_prob = hidden_dropout_prob
97+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
98+
self.max_position_embeddings = max_position_embeddings
99+
self.type_vocab_size = type_vocab_size
100+
self.initializer_range = initializer_range
101+
self.layer_norm_eps = layer_norm_eps
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
# coding=utf-8
2+
# Copyright 2018 The HuggingFace Inc. team.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
"""Convert ALBERT checkpoint."""
16+
17+
from __future__ import absolute_import
18+
from __future__ import division
19+
from __future__ import print_function
20+
21+
import argparse
22+
import torch
23+
24+
from transformers.configuration_albert import AlbertConfig
25+
26+
import logging
27+
28+
from transformers.modeling_albert import AlbertForMaskedLM, \
29+
load_tf_weights_in_albert
30+
31+
logging.basicConfig(level=logging.INFO)
32+
33+
34+
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file,
35+
pytorch_dump_path):
36+
# Initialise PyTorch model
37+
config = AlbertConfig.from_json_file(albert_config_file)
38+
print("Building PyTorch model from configuration: {}".format(str(config)))
39+
model = AlbertForMaskedLM(config)
40+
41+
# Load weights from tf checkpoint
42+
load_tf_weights_in_albert(model, config, tf_checkpoint_path)
43+
44+
# Save pytorch-model
45+
print("Save PyTorch model to {}".format(pytorch_dump_path))
46+
torch.save(model.state_dict(), pytorch_dump_path)
47+
48+
49+
if __name__ == "__main__":
50+
parser = argparse.ArgumentParser()
51+
## Required parameters
52+
parser.add_argument("--tf_checkpoint_path",
53+
default=None,
54+
type=str,
55+
required=True,
56+
help="Path to the TensorFlow checkpoint path.")
57+
parser.add_argument("--albert_config_file",
58+
default=None,
59+
type=str,
60+
required=True,
61+
help="The config json file corresponding to the pre-trained ALBERT model. \n"
62+
"This specifies the model architecture.")
63+
parser.add_argument("--pytorch_dump_path",
64+
default=None,
65+
type=str,
66+
required=True,
67+
help="Path to the output PyTorch model.")
68+
args = parser.parse_args()
69+
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
70+
args.albert_config_file,
71+
args.pytorch_dump_path)

0 commit comments

Comments
 (0)