Skip to content

Commit

Permalink
[CodeStyle][Typos][I-15] Fix typo infered (part1) (#70978)
Browse files Browse the repository at this point in the history
  • Loading branch information
SigureMo authored Jan 25, 2025
1 parent dfde721 commit 2e6bfc8
Show file tree
Hide file tree
Showing 15 changed files with 460 additions and 455 deletions.
2 changes: 1 addition & 1 deletion paddle/phi/kernels/onednn/reshape_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ static DDim ValidateShape(const std::vector<int64_t>& shape,
in_dims_vec.cend(),
[](int64_t i) { return i > 0; });
// only one dimension can be set to -1, whose size will be automatically
// infered
// inferred
const int64_t unk_dim_val = -1;
const int64_t copy_dim_val = 0;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ COMMON_DECLARE_bool(pir_apply_shape_optimization_pass);

constexpr int vlog_level = 3;

// TODO(zhangbopd): Some op results infered by InferSymbolicShape is NOT consist
// with the result infered by InferMeta and should be fixed.
// TODO(zhangbopd): Some op results inferred by InferSymbolicShape is NOT
// consist with the result inferred by InferMeta and should be fixed.
namespace {
bool NeedCheckInferSymbolicWithInferMeta(const std::string& op_name,
size_t result_idx) {
Expand Down
5 changes: 3 additions & 2 deletions paddle/pir/src/dialect/shape/utils/shape_analysis.cc
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ void ShapeConstraintIRAnalysis::InferShapeOrDataForValue(Value val) {
}
};

const auto& VisitNotInferedInputOp =
const auto& VisitNotInferredInputOp =
[&](Operation* op, const std::function<void(Operation*)>& Visit) {
for (auto& operand : GetRealOperandSource(op)) {
if (operand.impl() && !context_.HasShapeOrDataForValue(operand)) {
Expand All @@ -526,7 +526,8 @@ void ShapeConstraintIRAnalysis::InferShapeOrDataForValue(Value val) {
}
};

::common::BfsWalker<Operation*> build_subgraph_walker(VisitNotInferedInputOp);
::common::BfsWalker<Operation*> build_subgraph_walker(
VisitNotInferredInputOp);
build_subgraph_walker(val.defining_op(), [&](Operation* op) {
subgraph_ops.insert(op);
bool has_prev_op = false;
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/base/variable_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -910,7 +910,7 @@ def _getitem_static(x, indices):

def parse_bool_and_broadcast_indices(indices):
# deal with multiple Tensors and translating bool tensor to int tensor.
# In static mode, bool-tensor cannot be broadcasted since its corresponding int tensor's shape cannot be infered.
# In static mode, bool-tensor cannot be broadcasted since its corresponding int tensor's shape cannot be inferred.
for i, indice in enumerate(indices):
if (
indice.dtype == paddle.bool
Expand Down
16 changes: 8 additions & 8 deletions test/cpp/auto_parallel/custom_op_spmd_rule_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,25 +55,25 @@ TEST(CustomOp, Ctor) {
std::vector<CustomSpmdInferTensorArg> infer_inputs = {inputs};
std::vector<CustomSpmdInferAttrArg> attrs = {axis};

auto infered_dist_attrs = forward_spmd_func(infer_inputs, attrs);
auto inferred_dist_attrs = forward_spmd_func(infer_inputs, attrs);
// list of tensor => single tensor
EXPECT_EQ(infered_dist_attrs.first.size(), static_cast<size_t>(1));
EXPECT_EQ(infered_dist_attrs.second.size(), static_cast<size_t>(1));
EXPECT_EQ(inferred_dist_attrs.first.size(), static_cast<size_t>(1));
EXPECT_EQ(inferred_dist_attrs.second.size(), static_cast<size_t>(1));
EXPECT_TRUE(
paddle::holds_alternative<std::vector<phi::distributed::TensorDistAttr>>(
infered_dist_attrs.first[0]));
inferred_dist_attrs.first[0]));
EXPECT_TRUE(paddle::holds_alternative<phi::distributed::TensorDistAttr>(
infered_dist_attrs.second[0]));
inferred_dist_attrs.second[0]));
auto& inputs_infer1 =
PADDLE_GET_CONST(std::vector<phi::distributed::TensorDistAttr>,
infered_dist_attrs.first[0]);
inferred_dist_attrs.first[0]);

for (auto e : inputs_infer1) {
check_dim_mapping(e, {-1, 1, 0});
check_partial_dims(e, {});
}
check_dim_mapping(infered_dist_attrs.second[0], {-1, 1, 0});
check_partial_dims(infered_dist_attrs.second[0], {});
check_dim_mapping(inferred_dist_attrs.second[0], {-1, 1, 0});
check_partial_dims(inferred_dist_attrs.second[0], {});
}

TEST(CustomOp, Register) {
Expand Down
50 changes: 25 additions & 25 deletions test/cpp/auto_parallel/fused_rms_norm_spmd_rule_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,16 +44,16 @@ TEST(FusedRmsNormSPMDRule, test_fused_rms_norm) {
phi::distributed::DistMetaTensor x(common::make_ddim(x_shape), x_dist_attr);
phi::distributed::DistMetaTensor scale(common::make_ddim(scale_shape),
scale_dist_attr);
auto infered_dist_attrs = phi::distributed::RmsNormInferSpmd(x, scale, 0.5);
auto inferred_dist_attrs = phi::distributed::RmsNormInferSpmd(x, scale, 0.5);

size_t input_size = 2;
size_t output_size = 2;
EXPECT_EQ(infered_dist_attrs.first.size(), input_size);
EXPECT_EQ(infered_dist_attrs.second.size(), output_size);
check_dim_mapping(infered_dist_attrs.first[0], {1, -1, -1});
check_dim_mapping(infered_dist_attrs.first[1], {-1});
check_dim_mapping(infered_dist_attrs.second[0], {1, -1, -1});
check_dim_mapping(infered_dist_attrs.second[1], {1, -1});
EXPECT_EQ(inferred_dist_attrs.first.size(), input_size);
EXPECT_EQ(inferred_dist_attrs.second.size(), output_size);
check_dim_mapping(inferred_dist_attrs.first[0], {1, -1, -1});
check_dim_mapping(inferred_dist_attrs.first[1], {-1});
check_dim_mapping(inferred_dist_attrs.second[0], {1, -1, -1});
check_dim_mapping(inferred_dist_attrs.second[1], {1, -1});

VLOG(4) << "test1 done.";

Expand All @@ -63,11 +63,11 @@ TEST(FusedRmsNormSPMDRule, test_fused_rms_norm) {
scale = phi::distributed::DistMetaTensor(common::make_ddim(scale_shape),
scale_dist_attr);

infered_dist_attrs = phi::distributed::RmsNormInferSpmd(x, scale, 0.5);
check_dim_mapping(infered_dist_attrs.first[0], {1, 0, -1});
check_dim_mapping(infered_dist_attrs.first[1], {-1});
check_dim_mapping(infered_dist_attrs.second[0], {1, 0, -1});
check_dim_mapping(infered_dist_attrs.second[1], {1, 0});
inferred_dist_attrs = phi::distributed::RmsNormInferSpmd(x, scale, 0.5);
check_dim_mapping(inferred_dist_attrs.first[0], {1, 0, -1});
check_dim_mapping(inferred_dist_attrs.first[1], {-1});
check_dim_mapping(inferred_dist_attrs.second[0], {1, 0, -1});
check_dim_mapping(inferred_dist_attrs.second[1], {1, 0});
VLOG(4) << "test2 done.";

TensorDistAttr out_dist_attr = TensorDistAttr();
Expand All @@ -84,26 +84,26 @@ TEST(FusedRmsNormSPMDRule, test_fused_rms_norm) {
phi::distributed::DistMetaTensor invvar(common::make_ddim(variance_shape),
invvar_dist_attr);

infered_dist_attrs =
inferred_dist_attrs =
phi::distributed::RmsNormInferSpmdReverse(x, scale, out, invvar, 0.5);
check_dim_mapping(infered_dist_attrs.first[0], {0, 1, -1});
check_dim_mapping(infered_dist_attrs.first[1], {-1});
check_dim_mapping(infered_dist_attrs.second[0], {0, 1, -1});
check_dim_mapping(infered_dist_attrs.second[1], {0, 1});
check_dim_mapping(inferred_dist_attrs.first[0], {0, 1, -1});
check_dim_mapping(inferred_dist_attrs.first[1], {-1});
check_dim_mapping(inferred_dist_attrs.second[0], {0, 1, -1});
check_dim_mapping(inferred_dist_attrs.second[1], {0, 1});
VLOG(4) << "test3 done.";

x_dist_attr.set_dims_mapping({0, 1, -1});
x = phi::distributed::DistMetaTensor(common::make_ddim(x_shape), x_dist_attr);
infered_dist_attrs =
inferred_dist_attrs =
phi::distributed::RmsNormGradInferSpmd(x, scale, invvar, out, 0.5);

check_dim_mapping(infered_dist_attrs.first[0], {0, 1, -1});
check_dim_mapping(infered_dist_attrs.first[1], {-1});
check_dim_mapping(infered_dist_attrs.first[2], {0, 1});
check_dim_mapping(infered_dist_attrs.first[3], {0, 1, -1});
check_dim_mapping(infered_dist_attrs.second[0], {0, 1, -1});
check_dim_mapping(infered_dist_attrs.second[1], {-1});
check_partial_dims(infered_dist_attrs.second[1], {0, 1});
check_dim_mapping(inferred_dist_attrs.first[0], {0, 1, -1});
check_dim_mapping(inferred_dist_attrs.first[1], {-1});
check_dim_mapping(inferred_dist_attrs.first[2], {0, 1});
check_dim_mapping(inferred_dist_attrs.first[3], {0, 1, -1});
check_dim_mapping(inferred_dist_attrs.second[0], {0, 1, -1});
check_dim_mapping(inferred_dist_attrs.second[1], {-1});
check_partial_dims(inferred_dist_attrs.second[1], {0, 1});
}
} // namespace auto_parallel
} // namespace distributed
Expand Down
4 changes: 2 additions & 2 deletions test/cpp/auto_parallel/moe_combine_spmd_rule_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,9 @@ void test_moe_combine_spmd(
<< dist_attrs.size() << " != " << dims_mappings.size();

for (size_t j = 0; j < dist_attrs.size(); ++j) {
const ArgDistAttr& infered_attr = dist_attrs[j];
const ArgDistAttr& inferred_attr = dist_attrs[j];
const std::vector<int64_t>& expected_dims_mapping = dims_mappings[j];
check_dim_mapping(infered_attr, expected_dims_mapping);
check_dim_mapping(inferred_attr, expected_dims_mapping);
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions test/cpp/auto_parallel/moe_gate_dispatch_spmd_rule_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,9 @@ void test_moe_gate_dispatch_spmd(
<< dist_attrs.size() << " != " << dims_mappings.size();

for (size_t j = 0; j < dist_attrs.size(); ++j) {
const ArgDistAttr& infered_attr = dist_attrs[j];
const ArgDistAttr& inferred_attr = dist_attrs[j];
const std::vector<int64_t>& expected_dims_mapping = dims_mappings[j];
check_dim_mapping(infered_attr, expected_dims_mapping);
check_dim_mapping(inferred_attr, expected_dims_mapping);
}
}
}
Expand Down
Loading

0 comments on commit 2e6bfc8

Please sign in to comment.