Skip to content

Commit

Permalink
feat([trash collection]): add trash collection
Browse files Browse the repository at this point in the history
  • Loading branch information
HarukiMoriarty committed Jun 17, 2024
1 parent bd9ccf4 commit ca19150
Show file tree
Hide file tree
Showing 7 changed files with 245 additions and 144 deletions.
43 changes: 31 additions & 12 deletions calmapf/include/cache.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ struct Cache {
std::vector<Vertices> node_coming_cargo;
std::vector<std::vector<uint>> node_cargo_num;
std::vector<std::vector<uint>> bit_cache_get_lock;
std::vector<std::vector<uint>> bit_cache_insert_lock;
std::vector<std::vector<uint>> bit_cache_insert_or_clear_lock;
std::vector<std::vector<bool>> is_empty;

// LRU paras
Expand Down Expand Up @@ -72,6 +72,13 @@ struct Cache {
*/
bool _is_cargo_in_coming_cache(Vertex* cargo);

/**
* @brief Check if cache need a garbage collection.
* @param group cache block group number
* @return true if actually needs, or false.
*/
bool _is_garbage_collection(int group);

/**
* @brief Check if the cargo is in cache. Used for look ahead protocol.
* @param cargo A pointer to the Vertex representing the cargo.
Expand All @@ -82,21 +89,25 @@ struct Cache {
/**
* @brief Attempt to find a cached cargo and retrieve associated goals.
* @param cargo A pointer to the Vertex representing the cargo.
* @return A pointer to the Vertex of the cargo in the cache,
* or to the Vertex in the warehouse if not cached.
* @return A CacheAccessResult, true if we find cached cargo, false otherwise.
*/
Vertex* try_cache_cargo(Vertex* cargo);
CacheAccessResult try_cache_cargo(Vertex* cargo);

/**
* @brief Find a cache block for cargo that is not cached (cache-miss) and set goals.
* This is triggered when a cargo cache miss occurs.
* @brief Find an empty cache block for cargo that is not cached (cache-miss) and insert.
* @param cargo A pointer to the Vertex representing the cargo.
* @param port_list A pointer to the vector of Vertex representing the unloading ports.
* @return A pointer to the Vertex representing the cache block, or
* the unloading port Vertex if a suitable block cannot be found or
* if the cargo is already cached (in a multi-agent context).
*/
Vertex* try_insert_cache(Vertex* cargo, std::vector<Vertex*> port_list);
* @param unloading_port A pointer to the unloading port.
* @return A CacheAccessResult, true if we find one, false otherwise.
*/
CacheAccessResult try_insert_cache(Vertex* cargo, Vertex* unloading_port);

/**
* @brief Attempt to find a garbage to free one cache block.
* @param cargo A pointer to the Vertex representing the cargo.
* @return A CacheAccessResult, true if need to do garbage collection
* and actually find one to collect, false otherwise.
*/
CacheAccessResult try_cache_garbage_collection(Vertex* cargo);

/**
* @brief Insert cargo into cache. This occurs when an agent brings a
Expand All @@ -116,4 +127,12 @@ struct Cache {
*/
bool update_cargo_from_cache(Vertex* cargo, Vertex* cache_node);

/**
* @brief Release lock when clear a cached cargo.
* This occurs when an agent reaches the cached garbage cargo.
* @param cargo A pointer to the Vertex representing the garbage cargo.
* @param cache_node A pointer to the vertex representing the cache goal.
* @return true if succecssful, false otherwise.
*/
bool clear_cargo_from_cache(Vertex* cargo, Vertex* cache_node);
};
13 changes: 8 additions & 5 deletions calmapf/include/instance.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,19 @@ struct Instance {
Graph graph; // graph
Config starts; // initial configuration
Config goals; // goal configuration, can be in warehouse block/cache block
Config old_goals; // old goal configuration, used for trash collection
Config cargo_goals; // cargo goal configuration
std::vector<uint> cargo_cnts; // each cargo cnts, help variable for cargo_steps
std::vector<uint> cargo_steps; // each cargo steps

// Status control:
// 0 -> cache miss, going for warehouse get cargo
// 1 -> cache hit, going for cache get cargo
// 2 -> warehouse get cargo, find empty block, going back to insert cache
// 3 -> warehouse get cargo, cannot find empty block, going back to unloading port
// 4 -> cache get cargo, going back to unloading port
// 0 -> cache miss, need trash collection, going to cache to clear position (add clear lock)
// 1 -> cache miss, no need to trash collection / has moved trash back to warehouse, going to fetch cargo
// 2 -> cache hit, going to cache to get cargo (add read lock)
// 3 -> cache cleared, going to warehouse to bring back cargo
// 4 -> warehouse get cargo, find empty block, going back to insert cache (get write lock)
// 5 -> warehouse get cargo, cannot find empty block, going back to unloading port
// 6 -> cache get cargo from cache, going back to unloading port
std::vector<uint> bit_status;

std::vector<int> agent_group; // agents group
Expand Down
11 changes: 11 additions & 0 deletions calmapf/include/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,17 @@ inline bool is_cache(CacheType cache_type) {
return cache_type != CacheType::NONE;
}

// Cache access result
struct CacheAccessResult {
bool result;
Vertex* goal;

inline CacheAccessResult(bool _result, Vertex* _goal) : result(_result), goal(_goal) {};
bool operator==(const CacheAccessResult& other) const {
return result == other.result && goal == other.goal;
}
};

template <>
struct fmt::formatter<std::vector<unsigned int>> {
// Presentation format: 'f' - fixed, 'e' - exponential.
Expand Down
99 changes: 63 additions & 36 deletions calmapf/src/cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ int Cache::_get_cache_evited_policy_index(const uint group) {
case CacheType::LRU:
for (uint i = 0; i < LRU[group].size(); i++) {
// If it's not locked and (it's the first element or the smallest so far)
if (bit_cache_insert_lock[group][i] == 0 && bit_cache_get_lock[group][i] == 0 && (min_value == -1 || LRU[group][i] < min_value)) {
if (bit_cache_insert_or_clear_lock[group][i] == 0 && bit_cache_get_lock[group][i] == 0 && (min_value == -1 || LRU[group][i] < min_value)) {
min_value = LRU[group][i];
min_index = i;
}
Expand All @@ -57,7 +57,7 @@ int Cache::_get_cache_evited_policy_index(const uint group) {
case CacheType::FIFO:
for (uint i = 0; i < FIFO[group].size(); i++) {
// If it's not blocked and (it's the first element or the smallest so far)
if (bit_cache_insert_lock[group][i] == 0 && bit_cache_get_lock[group][i] == 0 && (min_value == -1 || FIFO[group][i] < min_value)) {
if (bit_cache_insert_or_clear_lock[group][i] == 0 && bit_cache_get_lock[group][i] == 0 && (min_value == -1 || FIFO[group][i] < min_value)) {
min_value = FIFO[group][i];
min_index = i;
}
Expand All @@ -66,7 +66,7 @@ int Cache::_get_cache_evited_policy_index(const uint group) {
case CacheType::RANDOM:
for (uint i = 0; i < node_id[group].size(); i++) {
// If it's not blocked
if (bit_cache_insert_lock[group][i] == 0 && bit_cache_get_lock[group][i] == 0) {
if (bit_cache_insert_or_clear_lock[group][i] == 0 && bit_cache_get_lock[group][i] == 0) {
candidate.push_back(i);
}
}
Expand All @@ -92,7 +92,6 @@ int Cache::_get_cache_block_in_cache_position(Vertex* block) {
}

}

// Cache goals must in cache
assert(index != -1);
return index;
Expand Down Expand Up @@ -124,75 +123,88 @@ bool Cache::_is_cargo_in_coming_cache(Vertex* cargo) {
return false;
}

bool Cache::_is_garbage_collection(int group) {
for (uint i = 0; i < is_empty[group].size(); i++) {
if (is_empty[group][i]) return true;
}
return false;
}

bool Cache::look_ahead_cache(Vertex* cargo) {
int cache_index = _get_cargo_in_cache_position(cargo);
if (cache_index >= 0 && bit_cache_insert_lock[cargo->group][cache_index] == 0) return true;
if (cache_index >= 0 && bit_cache_insert_or_clear_lock[cargo->group][cache_index] == 0) return true;
return false;
}

Vertex* Cache::try_cache_cargo(Vertex* cargo) {
CacheAccessResult Cache::try_cache_cargo(Vertex* cargo) {
int group = cargo->group;
int cache_index = _get_cargo_in_cache_position(cargo);

// If we can find cargo cached and is not reserved to be replaced , we go to cache and get it
if (cache_index >= 0 && bit_cache_insert_lock[cargo->group][cache_index] == 0) {
cache_console->debug("Cache hit! Agent will go {} to get cargo {}", *node_id[cargo->group][cache_index], *cargo);
// If we can find cargo cached, is not reserved to be replaced and is not reserved to be cleared, we go to cache and get it
if (cache_index >= 0 && bit_cache_insert_or_clear_lock[group][cache_index] == 0) {
cache_console->debug("Cache hit! Agent will go {} to get cargo {}", *node_id[group][cache_index], *cargo);
// For here, we allow multiple agents lock on cache get position
// It is impossible that a coming agent move cargo to this
// position while the cargo has already here
bit_cache_get_lock[cargo->group][cache_index] += 1;
bit_cache_get_lock[group][cache_index] += 1;
// We also update cache evicted policy statistics
_update_cache_evited_policy_statistics(cargo->group, cache_index, false);
_update_cache_evited_policy_statistics(group, cache_index, false);
// Update cargo number
node_cargo_num[cargo->group][cache_index] -= 1;
node_cargo_num[group][cache_index] -= 1;

return node_id[cargo->group][cache_index];
return CacheAccessResult(true, node_id[group][cache_index]);
}

// If we cannot find cargo cached, we directly go to warehouse
// cache_console->debug("Cache miss! Agent will directly to get cargo {}", *cargo);
return cargo;
return CacheAccessResult(false, cargo);
}

Vertex* Cache::try_insert_cache(Vertex* cargo, std::vector<Vertex*> port_list) {
CacheAccessResult Cache::try_insert_cache(Vertex* cargo, Vertex* unloading_port) {
int group = cargo->group;
Vertex* unloading_port = port_list[group];

// First, if cargo has already cached or is coming on the way, we directly go
// to unloading port, for simplify, we just check cache group here
if (_get_cargo_in_cache_position(cargo) != -2 || _is_cargo_in_coming_cache(cargo)) return unloading_port;
if (_get_cargo_in_cache_position(cargo) != -2 || _is_cargo_in_coming_cache(cargo)) return CacheAccessResult(false, nullptr);

// Second try to find a empty position to insert cargo
// TODO: optimization, can set a flag to skip this
for (uint i = 0; i < is_empty[group].size(); i++) {
if (is_empty[group][i]) {
cache_console->debug("Find an empty cache block with index {} {}", i, *node_id[group][i]);
cache_console->debug("Find an empty cache block with index {} {} to insert", i, *node_id[group][i]);
// We lock this position and update LRU info
bit_cache_insert_lock[group][i] += 1;
bit_cache_insert_or_clear_lock[group][i] += 1;
// Update coming cargo info
node_coming_cargo[group][i] = cargo;
// Update cache evited policy statistics
_update_cache_evited_policy_statistics(group, i, true);
// Set the position to be used
is_empty[group][i] = false;
return node_id[group][i];
return CacheAccessResult(true, node_id[group][i]);
}
}

// Third, try to find a LRU position that is not locked
int index = _get_cache_evited_policy_index(group);

// If we can find one, return the posititon
if (index != -1) {
// We lock this position and update LRU info
bit_cache_insert_lock[group][index] += 1;
// Updating coming cargo info
node_coming_cargo[group][index] = cargo;
_update_cache_evited_policy_statistics(group, index, true);
return node_id[group][index];
}
// There is no empty block, we can not insert into cache
return CacheAccessResult(false, unloading_port);
}

CacheAccessResult Cache::try_cache_garbage_collection(Vertex* cargo) {
int group = cargo->group;
if (_is_garbage_collection(group)) {
// Try to find a LRU position that is not locked
int index = _get_cache_evited_policy_index(group);

// If we can find one, return the position
if (index != -1) {
// We lock this position
bit_cache_insert_or_clear_lock[group][index] += 1;
return CacheAccessResult(true, node_id[group][index]);
}

// Else we can not insert into cache
else return unloading_port;
return CacheAccessResult(false, cargo);
}
else {
return CacheAccessResult(false, cargo);
}
}

bool Cache::update_cargo_into_cache(Vertex* cargo, Vertex* cache_node) {
Expand All @@ -206,7 +218,7 @@ bool Cache::update_cargo_into_cache(Vertex* cargo, Vertex* cache_node) {
// Update cache
cache_console->debug("Update cargo {} to cache block {}", *cargo, *cache_node);
node_cargo[cache_node->group][cache_index] = cargo;
bit_cache_insert_lock[cache_node->group][cache_index] -= 1;
bit_cache_insert_or_clear_lock[cache_node->group][cache_index] -= 1;
node_cargo_num[cache_node->group][cache_index] = parser->agent_capacity - 1;
return true;
}
Expand All @@ -231,3 +243,18 @@ bool Cache::update_cargo_from_cache(Vertex* cargo, Vertex* cache_node) {
return true;
}

bool Cache::clear_cargo_from_cache(Vertex* cargo, Vertex* cache_node) {
int cargo_index = _get_cargo_in_cache_position(cargo);
int cache_index = _get_cache_block_in_cache_position(cache_node);

// We must make sure the cargo is still in the cache
assert(cargo_index != -2);

// Simply release lock and set cache block as empty
cache_console->debug("Agents clear {} from cache {}", *cargo, *cache_node);
bit_cache_insert_or_clear_lock[cache_node->group][cache_index] -= 1;
is_empty[cache_node->group][cache_index] = true;

return true;
}

2 changes: 1 addition & 1 deletion calmapf/src/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ Graph::Graph(Parser* _parser) : parser(_parser)
cache->node_coming_cargo.push_back(tmp_cache_node);
cache->node_cargo_num.emplace_back(tmp_cache_node.size(), 0);
cache->bit_cache_get_lock.emplace_back(tmp_cache_node.size(), 0);
cache->bit_cache_insert_lock.emplace_back(tmp_cache_node.size(), 0);
cache->bit_cache_insert_or_clear_lock.emplace_back(tmp_cache_node.size(), 0);
cache->is_empty.emplace_back(tmp_cache_node.size(), true);
switch (parser->cache_type) {
case CacheType::LRU:
Expand Down
Loading

0 comments on commit ca19150

Please sign in to comment.