Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix typos #4994

Merged
merged 1 commit into from
Feb 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def create_predictor(self, model_path):

if self.args.device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class TritonPythonModel(object):
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class TritonPythonModel(object):
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class TritonPythonModel(object):
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class TritonPythonModel(object):
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def get_latest_ann_data(ann_data_dir):

def valid_checkpoint(step):
ann_data_file = os.path.join(ann_data_dir, step, "new_ann_data")
# succed_flag_file is an empty file that indicates ann data has been generated
# succeed_flag_file is an empty file that indicates ann data has been generated
succeed_flag_file = os.path.join(ann_data_dir, step, "succeed_flag_file")
return os.path.exists(succeed_flag_file) and os.path.exists(ann_data_file)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
parser.add_argument("--recall_result_file", type=str,
default='', help="The full path of recall result file")
parser.add_argument("--recall_num", type=int, default=10,
help="Most similair number of doc recalled from corpus per query")
help="Most similar number of doc recalled from corpus per query")


args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def forward(

cosine_sim = paddle.matmul(query_cls_embedding, title_cls_embedding, transpose_y=True)

# substract margin from all positive samples cosine_sim()
# subtract margin from all positive samples cosine_sim()
margin_diag = paddle.full(
shape=[query_cls_embedding.shape[0]], fill_value=self.margin, dtype=paddle.get_default_dtype()
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@
parser.add_argument('--device', choices=['cpu', 'gpu'], default="cpu",
help="Select which device to train model, defaults to gpu.")
parser.add_argument('--save_steps', type=int, default=10000,
help="Inteval steps to save checkpoint")
help="Interval steps to save checkpoint")
parser.add_argument('--log_steps', type=int, default=10,
help="Inteval steps to print log")
help="Interval steps to print log")
parser.add_argument("--train_set_file", type=str,
default='./data/train.txt',
help="The full path of train_set_file.")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class TritonPythonModel(object):
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class TritonPythonModel(object):
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def __init__(
config = paddle.inference.Config(model_file, param_file)
if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def __init__(
config = paddle.inference.Config(model_file, param_file)
if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def __init__(
config = paddle.inference.Config(model_file, param_file)
if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class TritonPythonModel:
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class TritonPythonModel:
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class TritonPythonModel:
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class TritonPythonModel:
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Expand Down
2 changes: 1 addition & 1 deletion paddlenlp/data/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def shard(self, num_replicas=None, rank=None):
Default: None.
rank (int, optional): The id of current training process. Equal
to the value of the environment variable PADDLE_TRAINER_ID. If
None, it will be intialized by :meth:`paddle.distributed.get_rank`
None, it will be initialized by :meth:`paddle.distributed.get_rank`
method. Default: None.

Returns:
Expand Down
2 changes: 1 addition & 1 deletion tests/test_tipc/ernie_information_extraction/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_tipc/ernie_text_cls/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_tipc/ernie_text_matching/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def __init__(

if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
# such as initialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
Expand Down