Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
1 change: 0 additions & 1 deletion .cmake-format.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
# Options affecting formatting.
# -----------------------------
with section("format"):

# How wide to allow formatted cmake files
line_width = 80

Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ repos:
)$
# For Python files
- repo: https://github.com/psf/black.git
rev: 22.8.0
rev: 23.3.0
hooks:
- id: black
files: (.*\.(py|pyi|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,6 @@ def GenerateCoreOpInfoDeclaration():


def GenerateCoreOpInfoDefinition():

op_args_info_list = []
for op_name, arg_list in core_ops_args_info.items():
arg_str = ",".join(["\"" + v + "\"" for v in arg_list])
Expand Down Expand Up @@ -803,7 +802,6 @@ def CollectBackwardInfo(self):
self.backward_returns_list = backward_returns_list_new

def CollectForwardInfoFromBackwardContents(self):

backward_forward_str = self.backward_forward_str

(
Expand Down Expand Up @@ -1910,7 +1908,6 @@ def GenerateHigherOrderNodeCreationCode(self):
self.grad_api_contents["backward_op"] in prim_white_list
or is_invoke_forward_api
):

next_grad_node_creation_str = f"""
if (!paddle::prim::PrimCommonUtils::IsEagerPrimEnabled()) {{
if(trace_backward) {{
Expand Down Expand Up @@ -2274,7 +2271,6 @@ def GenerateNodeDefinition(
egr::EagerUtils::HandleViewBetweenInputAndOutput({inplace_grad_input_str}, api_output_{out_index});
}}"""
if IsPlainTensorType(ttype):

if (
backward_inplace_map
and name in backward_inplace_map.values()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -604,7 +604,6 @@ def GenerateCoreOpsInfoMap():


def GeneratePythonCWrappers(python_c_function_str, python_c_function_reg_str):

(
core_ops_infos_definition,
core_ops_infos_registry,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/generator/generate_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ def parse_get_expected_kerneltype(
fw_name = op_comp_map['op'].split('(')[0].strip()
# deal the last underline of function name in op_comp_map['get_expected_kernel_type']
new_get_expected_kernel_type_func_map = {}
for (key, value) in op_comp_map['get_expected_kernel_type'].items():
for key, value in op_comp_map['get_expected_kernel_type'].items():
new_get_expected_kernel_type_func_map[
delete_last_underline(key)
] = value
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/generator/parse_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -615,15 +615,15 @@ def cross_validate(ops):
assert len(fw_call["inputs"]) <= len(
fw_op["inputs"]
), f"{name}: forward call has more inputs than the op "
for (input, input_) in zip(fw_call["inputs"], fw_op["inputs"]):
for input, input_ in zip(fw_call["inputs"], fw_op["inputs"]):
assert (
input["typename"] == input_["typename"]
), f"type mismatch in {name} and {fw_name}"

assert len(fw_call["attrs"]) <= len(
fw_op["attrs"]
), f"{name}: forward call has more attrs than the op "
for (attr, attr_) in zip(fw_call["attrs"], fw_op["attrs"]):
for attr, attr_ in zip(fw_call["attrs"], fw_op["attrs"]):
if attr["typename"] == "Scalar":
# special case for Scalar, fw_call can omit the type
assert re.match(
Expand All @@ -637,7 +637,7 @@ def cross_validate(ops):
assert len(fw_call["outputs"]) == len(
fw_op["outputs"]
), f"{name}: forward call has more outputs than the op "
for (output, output_) in zip(
for output, output_ in zip(
fw_call["outputs"], fw_op["outputs"]
):
assert (
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/api/yaml/generator/backward_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,6 @@ def generate_backward_api(
header_file_path,
source_file_path,
):

bw_apis = []
for each_api_yaml in backward_yaml_path:
with open(each_api_yaml, 'r') as f:
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/api/yaml/generator/intermediate_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ def generate_intermediate_api(
dygraph_header_file_path,
dygraph_source_file_path,
):

dygraph_header_file = open(dygraph_header_file_path, 'w')
dygraph_source_file = open(dygraph_source_file_path, 'w')

Expand Down
1 change: 0 additions & 1 deletion paddle/phi/api/yaml/generator/sparse_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,6 @@ def api_namespace():


def generate_api(api_yaml_path, header_file_path, source_file_path):

with open(api_yaml_path, 'r') as f:
apis = yaml.load(f, Loader=yaml.FullLoader)
header_file = open(header_file_path, 'w')
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/api/yaml/generator/sparse_bw_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,6 @@ def api_namespace():


def generate_api(api_yaml_path, header_file_path, source_file_path):

with open(api_yaml_path, 'r') as f:
apis = yaml.load(f, Loader=yaml.FullLoader)
header_file = open(header_file_path, 'w')
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/api/yaml/generator/strings_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,6 @@ def api_namespace():


def generate_api(api_yaml_path, header_file_path, source_file_path):

with open(api_yaml_path, 'r') as f:
apis = yaml.load(f, Loader=yaml.FullLoader)
header_file = open(header_file_path, 'w')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ def CreateGatherGemmScatterOperator(
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:

alignment_c = min(8, alignment)

A = TensorDescription(
Expand Down Expand Up @@ -98,7 +97,6 @@ def CreateGatherGemmScatterOperator(


def GenerateSM80_TensorOp_16816(manifest, cuda_version, debug=False):

if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return

Expand Down Expand Up @@ -211,7 +209,6 @@ def GenerateSM80_TensorOp_16816(manifest, cuda_version, debug=False):

# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:

data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
Expand All @@ -225,7 +222,6 @@ def GenerateSM80_TensorOp_16816(manifest, cuda_version, debug=False):


def GenerateSM80_TensorOp_1688(manifest, cuda_version, debug=False):

if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return

Expand Down Expand Up @@ -341,7 +337,6 @@ def GenerateSM80_TensorOp_1688(manifest, cuda_version, debug=False):


def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version, debug=False):

if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return

Expand Down Expand Up @@ -443,7 +438,6 @@ def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version, debug=False):
def GenerateSM80_TensorOp_1688_fast_fp32_math(
manifest, cuda_version, debug=False
):

if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return

Expand Down Expand Up @@ -525,7 +519,6 @@ def GenerateSM80_TensorOp_1688_fast_fp32_math(


def GenerateSM75_TensorOp_1688(manifest, cuda_version, debug=False):

if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return

Expand Down Expand Up @@ -649,7 +642,6 @@ def __init__(


if __name__ == "__main__":

args = KernelCfg(
architectures='80',
build_dir=sys.argv[2],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,6 @@ def __exit__(self, exception_type, exception_value, traceback):

class GatherGemmScatterManifest(Manifest):
def emit(self, target=GeneratorTarget.Library):

operation_emitters = {
GeneratorTarget.Library: GatherGemmScatterEmitOperationKindLibrary
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ def instance_template(self):
return ""

def emit(self, operation):

threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count

Expand All @@ -107,7 +106,6 @@ def emit(self, operation):
and operation.B.layout in transpose_layouts.keys()
and operation.C.layout in transpose_layouts.keys()
):

instance_layout_A = transpose_layouts[operation.A.layout]
instance_layout_B = transpose_layouts[operation.B.layout]
instance_layout_C = transpose_layouts[operation.C.layout]
Expand All @@ -124,7 +122,6 @@ def emit(self, operation):

# Support built-in epilogue functors or user-defined functions
if isinstance(operation.epilogue_functor, enum.Enum):

epilogue_vector_length = (
min(
operation.C.alignment * DataTypeSize[operation.C.element],
Expand Down Expand Up @@ -256,7 +253,6 @@ def __enter__(self):
return self

def __exit__(self, exception_type, exception_value, traceback):

# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
Expand All @@ -278,7 +274,6 @@ def __init__(
epilogue_functor=EpilogueFunctor.LinearCombination,
swizzling_functor=SwizzlingFunctor.Identity8,
):

super().__init__(
gemm_kind,
arch,
Expand Down
1 change: 0 additions & 1 deletion python/paddle/amp/accuracy_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,6 @@ def _write_titles(self, worksheet, loss_scale, row):
def add_worksheet(
self, mp_tensor_info_list, sheetname, loss_scale, skip_normal_tensors
):

assert self.workbook is not None

worksheet = self.workbook.add_worksheet(sheetname)
Expand Down
1 change: 0 additions & 1 deletion python/paddle/amp/debugging.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,6 @@ def __init__(
debug_step=None,
stack_height_limit=1,
):

self.enable = enable
self.debug_mode = debug_mode
self.output_dir = output_dir
Expand Down
1 change: 0 additions & 1 deletion python/paddle/amp/grad_scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ def __init__(
decr_every_n_nan_or_inf=1,
use_dynamic_loss_scaling=True,
):

tracer = _dygraph_tracer()
if not tracer:
raise ValueError(
Expand Down
1 change: 0 additions & 1 deletion python/paddle/cost_model/cost_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ def profile_measure(
device='gpu',
fetch_cost_list=['time'],
):

place = paddle.set_device('gpu')
x = np.random.random(size=(10, 1)).astype('float32')
exe = paddle.static.Executor(place)
Expand Down
1 change: 0 additions & 1 deletion python/paddle/dataset/conll05.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,6 @@ def reader_creator(
):
def reader():
for sentence, predicate, labels in corpus_reader():

sen_len = len(sentence)

verb_index = labels.index('B-V')
Expand Down
1 change: 0 additions & 1 deletion python/paddle/dataset/voc2012.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@


def reader_creator(filename, sub_name):

tarobject = tarfile.open(filename)
name2mem = {}
for ele in tarobject.getmembers():
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/distributed/auto_parallel/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ def parallel_manual_seed(seed):


def determinate_rng(rank, dims_mapping, process_mesh):

# TODO(JZ-LIANG) Support Mesh with any high rank
# use a string to unique integer hashing algorithm for seed computation.
# instead of using offsets to coodinate seed across devices.
Expand Down Expand Up @@ -119,7 +118,6 @@ def determinate_rng(rank, dims_mapping, process_mesh):


def init_auto_parallel_rng():

if not is_enable_auto_rand_ctrl():
return

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ def load(save_dir):
assert os.path.isfile(filepath)
if "vars" in filename:
assert filename.endswith("pkl")
with (open(filepath, "rb")) as f:
with open(filepath, "rb") as f:
vars_list.append(pickle.load(f))
elif "program" in filename:
assert filename.endswith("pdmodel")
Expand All @@ -328,7 +328,7 @@ def load(save_dir):
program_list.append(deserialize_program(program_string))
elif "dist_attr" in filename:
assert filename.endswith("pkl")
with (open(filepath, "rb")) as f:
with open(filepath, "rb") as f:
dist_attr_list.append(pickle.load(f))

dist_attr_map = {}
Expand Down
1 change: 0 additions & 1 deletion python/paddle/distributed/auto_parallel/static/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ def __repr__(self):


class Link:

default_hop = 1
default_nic_bandwidth = 24

Expand Down
4 changes: 0 additions & 4 deletions python/paddle/distributed/auto_parallel/static/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -1257,7 +1257,6 @@ def _get_op_by_id(ops, id):

# grad ops that have not a corresponding mapping in grad_op_id_to_op_id
else:

if grad_op.type == 'sum':
assert all(map(_is_grad_var_name, grad_op.input_arg_names))
output_name = grad_op.output_arg_names[0]
Expand Down Expand Up @@ -1382,7 +1381,6 @@ def _get_op_by_id(ops, id):
]

for idx in range(first_backward_op_idx, len(ops)):

# complete the initial grad loss op
if idx == first_backward_op_idx:
assert ops[idx].type == "fill_constant"
Expand Down Expand Up @@ -1656,7 +1654,6 @@ def complete_update_annotation(self, serial_main_program):
learning_rate_completed = False

for idx in range(len(ops)):

# complete the annotation of the optimizer op.
# TODO to add attribute for moment var
op = ops[idx]
Expand Down Expand Up @@ -1823,7 +1820,6 @@ def complete_update_annotation(self, serial_main_program):
)

for input_name in op.desc.input_names():

if input_name in [
'Param',
'Grad',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,6 @@ def _parse_sub_program(self, program, nodes, graph, cost_data, sub_idx):
if pred.type == CostNodeType.COMPUTATION and (
pred_id in graph[node_id][SUCC]
):

graph[pred_id][SUCC].remove(node_id)
graph[node_id][PRED].remove(pred_id)

Expand Down
Loading