Skip to content

Commit

Permalink
[clang-tidy] [No.4] enable modernize-loop-convert (#55704)
Browse files Browse the repository at this point in the history
  • Loading branch information
GreatV authored Aug 3, 2023
1 parent 131f576 commit 81ccd99
Show file tree
Hide file tree
Showing 183 changed files with 1,345 additions and 1,549 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ modernize-avoid-bind,
-modernize-avoid-c-arrays,
-modernize-deprecated-headers,
-modernize-deprecated-ios-base-aliases,
-modernize-loop-convert,
modernize-loop-convert,
-modernize-make-shared,
modernize-make-unique,
-modernize-pass-by-value,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ ReductionSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
input_axes_vec.emplace_back(input_axes);

// get einsum notation for output
for (int64_t i = 0, n = reduce_dims.size(); i < n; ++i) {
for (auto& reduce_dim : reduce_dims) {
// convert the negative dim value to normal dim value
if (reduce_dims[i] < 0) {
reduce_dims[i] = ndim + reduce_dims[i];
if (reduce_dim < 0) {
reduce_dim = ndim + reduce_dim;
}
}
std::string output_axes = "";
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/distributed/collective/process_group.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ ProcessGroupIdMap& ProcessGroupIdMap::GetInstance() {

void ProcessGroupIdMap::DestroyProcessGroup() {
auto& id_map = ProcessGroupIdMap::GetInstance();
for (auto iter = id_map.begin(); iter != id_map.end(); ++iter) {
auto use_count = iter->second.use_count();
for (auto& item : id_map) {
auto use_count = item.second.use_count();
for (int i = 0; i < use_count; ++i) {
iter->second.reset();
item.second.reset();
}
}
id_map.clear();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/collective/process_group_gloo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ std::vector<T*> get_multi_data(
std::vector<phi::DenseTensor>& tensors) { // NOLINT
std::vector<T*> ret;
ret.reserve(tensors.size());
for (size_t i = 0; i < tensors.size(); i++) {
ret.push_back(get_data<T>(tensors[i]));
for (auto& tensor : tensors) {
ret.push_back(get_data<T>(tensor));
}
return ret;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -651,8 +651,8 @@ void EagerReducer::TraverseBackwardGraph(const std::vector<Tensor> &outputs) {
egr::kSlotSmallVectorSize> &metas =
node->OutputMeta();
for (size_t i = 0; i < metas.size(); i++) {
for (size_t j = 0; j < metas[i].size(); j++) {
const egr::Edge &edge = metas[i][j].GetEdge();
for (const auto &item : metas[i]) {
const egr::Edge &edge = item.GetEdge();
auto next_node_shared = edge.GetMutableGradNode();
if (!next_node_shared || !next_node_shared.get()) {
continue;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/fleet_executor/task_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,8 @@ bool TaskNode::AddDownstreamTask(int64_t task_id,
std::string TaskNode::DebugString() const {
std::ostringstream os;
os << "role: " << role_ << ", task_id: " << task_id_ << "\n";
for (std::size_t i = 0; i < ops_.size(); ++i) {
os << ops_[i]->Type() << " ";
for (auto op : ops_) {
os << op->Type() << " ";
}
os << "\n";
return os.str();
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/distributed/index_dataset/index_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,9 @@ int TreeIndex::Load(const std::string filename) {
std::vector<IndexNode> TreeIndex::GetNodes(const std::vector<uint64_t>& codes) {
std::vector<IndexNode> nodes;
nodes.reserve(codes.size());
for (size_t i = 0; i < codes.size(); i++) {
if (CheckIsValid(codes[i])) {
nodes.push_back(data_.at(codes[i]));
for (auto code : codes) {
if (CheckIsValid(code)) {
nodes.push_back(data_.at(code));
} else {
nodes.push_back(fake_node_);
}
Expand Down Expand Up @@ -128,11 +128,11 @@ std::vector<uint64_t> TreeIndex::GetAncestorCodes(
res.reserve(ids.size());

int cur_level;
for (size_t i = 0; i < ids.size(); i++) {
if (id_codes_map_.find(ids[i]) == id_codes_map_.end()) {
for (auto id : ids) {
if (id_codes_map_.find(id) == id_codes_map_.end()) {
res.push_back(max_code_);
} else {
auto code = id_codes_map_.at(ids[i]);
auto code = id_codes_map_.at(id);
cur_level = meta_.height() - 1;

while (level >= 0 && cur_level > level) {
Expand Down
32 changes: 15 additions & 17 deletions paddle/fluid/distributed/ps/service/brpc_ps_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -601,8 +601,8 @@ std::future<int32_t> BrpcPsClient::CacheShuffleMultiTable(
param.push_back(path);
param.push_back(mode);
param.push_back(cache_threshold);
for (size_t i = 0; i < tables.size(); i++) {
param.push_back(std::to_string(tables[i]));
for (auto table : tables) {
param.push_back(std::to_string(table));
}
return SendSaveCmd(0, PS_CACHE_SHUFFLE, param);
}
Expand Down Expand Up @@ -1181,15 +1181,14 @@ std::future<int32_t> BrpcPsClient::PullSparse(float **select_values,
uint64_t last_key = UINT64_MAX;
float *last_value_data = NULL;

for (size_t kv_idx = 0; kv_idx < request_kvs.size(); ++kv_idx) {
auto *kv_pair = &(request_kvs[kv_idx]);
if (kv_pair->first == last_key) {
memcpy(reinterpret_cast<void *>(kv_pair->second),
for (auto &kv_pair : request_kvs) {
if (kv_pair.first == last_key) {
memcpy(reinterpret_cast<void *>(kv_pair.second),
reinterpret_cast<void *>(last_value_data),
value_size);
} else {
last_key = kv_pair->first;
last_value_data = kv_pair->second;
last_key = kv_pair.first;
last_value_data = kv_pair.second;
if (value_size !=
io_buffer_itr.copy_and_forward(
reinterpret_cast<void *>(last_value_data), value_size)) {
Expand Down Expand Up @@ -1296,15 +1295,14 @@ std::future<int32_t> BrpcPsClient::PullSparseParam(float **select_values,
float *last_value_data = NULL;

// can remove sort&unique
for (size_t kv_idx = 0; kv_idx < request_kvs.size(); ++kv_idx) {
auto *kv_pair = &(request_kvs[kv_idx]);
if (kv_pair->first == last_key) {
memcpy(reinterpret_cast<void *>(kv_pair->second),
for (auto &kv_pair : request_kvs) {
if (kv_pair.first == last_key) {
memcpy(reinterpret_cast<void *>(kv_pair.second),
reinterpret_cast<void *>(last_value_data),
value_size);
} else {
last_key = kv_pair->first;
last_value_data = kv_pair->second;
last_key = kv_pair.first;
last_value_data = kv_pair.second;
if (value_size !=
io_buffer_itr.copy_and_forward(
reinterpret_cast<void *>(last_value_data), value_size)) {
Expand Down Expand Up @@ -1613,9 +1611,9 @@ void BrpcPsClient::PushSparseTaskConsume() {
++_async_call_num;

int merge_count = 0;
for (size_t i = 0; i < task_list.size(); ++i) {
if (task_list[i]->data()) {
_sparse_task_pool.push(task_list[i]->data());
for (auto &task : task_list) {
if (task->data()) {
_sparse_task_pool.push(task->data());
}
}
auto sparse_task_data = _sparse_task_pool.get();
Expand Down
23 changes: 11 additions & 12 deletions paddle/fluid/distributed/ps/service/communicator/communicator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -201,9 +201,9 @@ void Communicator::RpcSendDense(const CommContext &ctx,
request_call_num); // accessor->update_dim() = 1
float *data = dense_data->data();
uint32_t pos = 0;
for (size_t i = 0; i < var_names.size(); ++i) {
for (const auto &var_name : var_names) {
const phi::DenseTensor tensor =
scope.FindVar(var_names[i])->Get<phi::DenseTensor>();
scope.FindVar(var_name)->Get<phi::DenseTensor>();
size_t count = static_cast<size_t>(tensor.numel());
const float *g = tensor.data<float>();
CHECK(pos + count <= dense_data->size())
Expand Down Expand Up @@ -602,8 +602,7 @@ void AsyncCommunicator::PullSparseToTensorSync(
float *output_data = nullptr;
size_t output_index = -1;
size_t output_len = 0;
for (size_t index = 0; index < inputs->size(); ++index) {
const phi::DenseTensor *tensor = inputs->at(index);
for (auto tensor : *inputs) {
const int64_t *ids = tensor->data<int64_t>();
size_t len = tensor->numel();
for (size_t i = 0; i < len; ++i, output_len += fea_dim) {
Expand Down Expand Up @@ -892,11 +891,11 @@ bool AsyncCommunicator::Check(const int table_id) {
void AsyncCommunicator::Send(const std::vector<std::string> &var_names,
const framework::Scope &scope) {
waiting_ = false;
for (size_t i = 0; i < var_names.size(); i++) {
auto *var = scope.FindVar(var_names[i]);
for (const auto &var_name : var_names) {
auto *var = scope.FindVar(var_name);
auto tmp_grad_var = std::make_shared<Variable>();
framework::CopyVariable(*var, tmp_grad_var.get());
send_varname_to_queue_[var_names[i]]->Push(tmp_grad_var);
send_varname_to_queue_[var_name]->Push(tmp_grad_var);
}
}

Expand Down Expand Up @@ -1045,10 +1044,10 @@ void GeoCommunicator::Send(
// insert ids which has not been record
// VLOG(0) << "fl-ps > table_name: " << table_name << " splited_var_nums: " <<
// splited_var_nums << " rows size: " << rows.size();
for (size_t j = 0; j < rows.size(); j++) { // batch_size == rows.size()
auto ep_idx = rows[j] % splited_var_nums;
for (auto row : rows) { // batch_size == rows.size()
auto ep_idx = row % splited_var_nums;
ids_table.at(send_varname_to_ctx_[table_name].splited_varnames[ep_idx])
.insert(rows[j]);
.insert(row);
// VLOG(0) << " id: " << rows[j] << " ";
}

Expand Down Expand Up @@ -1299,8 +1298,8 @@ std::vector<int64_t> GeoCommunicator::MergeSparseIds(
wait_times = 0;
std::shared_ptr<std::vector<int64_t>> pop_ids = nullptr;
sparse_id_queues_.at(send_varname)->Get(pop_ids);
for (size_t j = 0; j < pop_ids->size(); j++) {
sparse_ids.insert(pop_ids->at(j));
for (auto &pop_id : *pop_ids) {
sparse_ids.insert(pop_id);
}
merge_num += 1;
VLOG(3) << "sparse_id_queues_(" << send_varname << ") pushed";
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/distributed/ps/service/coordinator_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -115,19 +115,19 @@ int32_t CoordinatorClient::Initialize(
fl_client_list[i].rank = i; // TO CHECK
}
std::string fl_client_ip_port;
for (size_t i = 0; i < fl_client_list.size(); ++i) {
fl_client_ip_port.assign(fl_client_list[i].ip);
for (auto& fl_client : fl_client_list) {
fl_client_ip_port.assign(fl_client.ip);
fl_client_ip_port.append(":");
fl_client_ip_port.append(std::to_string(fl_client_list[i].port));
uint32_t rank = fl_client_list[i].rank;
fl_client_ip_port.append(std::to_string(fl_client.port));
uint32_t rank = fl_client.rank;
VLOG(0) << "fl-ps > coordinator connect to fl_client: " << rank;
_fl_client_channels[rank].reset(new brpc::Channel());
if (_fl_client_channels[rank]->Init(
fl_client_ip_port.c_str(), "", &options) != 0) {
LOG(ERROR) << "CoordinatorClient connect to FLClient:"
<< fl_client_ip_port << " Failed! Try again.";
std::string int_ip_port =
GetIntTypeEndpoint(fl_client_list[i].ip, fl_client_list[i].port);
GetIntTypeEndpoint(fl_client.ip, fl_client.port);
if (_fl_client_channels[rank]->Init(int_ip_port.c_str(), "", &options) !=
0) {
LOG(ERROR) << "CoordinatorClient connect to PSClient:" << int_ip_port
Expand Down
24 changes: 10 additions & 14 deletions paddle/fluid/distributed/ps/service/graph_brpc_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ std::future<int32_t> GraphBrpcClient::get_node_feat(
std::vector<std::vector<std::string>> &res) {
std::vector<int> request2server;
std::vector<int> server2request(server_size, -1);
for (size_t query_idx = 0; query_idx < node_ids.size(); ++query_idx) {
int server_index = get_server_index_by_id(node_ids[query_idx]);
for (auto node_id : node_ids) {
int server_index = get_server_index_by_id(node_id);
if (server2request[server_index] == -1) {
server2request[server_index] = request2server.size();
request2server.push_back(server_index);
Expand Down Expand Up @@ -102,10 +102,7 @@ std::future<int32_t> GraphBrpcClient::get_node_feat(

for (size_t feat_idx = 0; feat_idx < feature_names.size();
++feat_idx) {
for (size_t node_idx = 0;
node_idx < query_idx_buckets.at(request_idx).size();
++node_idx) {
int query_idx = query_idx_buckets.at(request_idx).at(node_idx);
for (auto query_idx : query_idx_buckets.at(request_idx)) {
size_t feat_len = *reinterpret_cast<size_t *>(buffer);
buffer += sizeof(size_t);
auto feature = std::string(buffer, feat_len);
Expand Down Expand Up @@ -273,15 +270,14 @@ std::future<int32_t> GraphBrpcClient::remove_graph_node(
std::vector<std::vector<int64_t>> request_bucket;
std::vector<int> server_index_arr;
std::vector<int> index_mapping(server_size, -1);
for (size_t query_idx = 0; query_idx < node_id_list.size(); ++query_idx) {
int server_index = get_server_index_by_id(node_id_list[query_idx]);
for (auto &node_id : node_id_list) {
int server_index = get_server_index_by_id(node_id);
if (index_mapping[server_index] == -1) {
index_mapping[server_index] = request_bucket.size();
server_index_arr.push_back(server_index);
request_bucket.push_back(std::vector<int64_t>());
}
request_bucket[index_mapping[server_index]].push_back(
node_id_list[query_idx]);
request_bucket[index_mapping[server_index]].push_back(node_id);
}
size_t request_call_num = request_bucket.size();
DownpourBrpcClosure *closure = new DownpourBrpcClosure(
Expand Down Expand Up @@ -407,8 +403,8 @@ std::future<int32_t> GraphBrpcClient::batch_sample_neighbors(
std::vector<int> server2request(server_size, -1);
res.clear();
res_weight.clear();
for (size_t query_idx = 0; query_idx < node_ids.size(); ++query_idx) {
int server_index = get_server_index_by_id(node_ids[query_idx]);
for (auto node_id : node_ids) {
int server_index = get_server_index_by_id(node_id);
if (server2request[server_index] == -1) {
server2request[server_index] = request2server.size();
request2server.push_back(server_index);
Expand Down Expand Up @@ -621,8 +617,8 @@ std::future<int32_t> GraphBrpcClient::set_node_feat(
const std::vector<std::vector<std::string>> &features) {
std::vector<int> request2server;
std::vector<int> server2request(server_size, -1);
for (size_t query_idx = 0; query_idx < node_ids.size(); ++query_idx) {
int server_index = get_server_index_by_id(node_ids[query_idx]);
for (auto node_id : node_ids) {
int server_index = get_server_index_by_id(node_id);
if (server2request[server_index] == -1) {
server2request[server_index] = request2server.size();
request2server.push_back(server_index);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,14 +85,14 @@ void GraphPyService::set_up(std::string ips_str,
std::vector<std::vector<int32_t>> table_feat_conf_feat_shape;
*/
id_to_edge = edge_types;
for (size_t table_id = 0; table_id < edge_types.size(); table_id++) {
for (const auto& edge_type : edge_types) {
int res = static_cast<int>(edge_to_id.size());
edge_to_id[edge_types[table_id]] = res;
edge_to_id[edge_type] = res;
}
id_to_feature = node_types;
for (size_t table_id = 0; table_id < node_types.size(); table_id++) {
for (const auto& node_type : node_types) {
int res = static_cast<int>(feature_to_id.size());
feature_to_id[node_types[table_id]] = res;
feature_to_id[node_type] = res;
}
table_feat_mapping.resize(node_types.size());
this->table_feat_conf_feat_name.resize(node_types.size());
Expand Down
Loading

0 comments on commit 81ccd99

Please sign in to comment.