A bit more cleanup to avoid duplicate testing and to separate the GOAP algorithm code from the little AI Manager thing.

master
Zed A. Shaw 3 weeks ago
parent b2c1b220ac
commit 3f83d3f0bb
  1. 1
      .gitignore
  2. 124
      ai.cpp
  3. 69
      ai.hpp
  4. 127
      goap.cpp
  5. 76
      goap.hpp
  6. 5
      meson.build
  7. 99
      tests/ai.cpp
  8. 85
      tests/ai_fixture.json

1
.gitignore vendored

@ -27,3 +27,4 @@ backup
*.dll
*.world
coverage
.venv

124
ai.cpp

@ -5,126 +5,6 @@ namespace ai {
using namespace nlohmann;
using namespace dbc;
bool is_subset(State& source, State& target) {
State result = source & target;
return result == target;
}
void Action::needs(int name, bool val) {
if(val) {
$positive_preconds[name] = true;
$negative_preconds[name] = false;
} else {
$negative_preconds[name] = true;
$positive_preconds[name] = false;
}
}
void Action::effect(int name, bool val) {
if(val) {
$positive_effects[name] = true;
$negative_effects[name] = false;
} else {
$negative_effects[name] = true;
$positive_effects[name] = false;
}
}
bool Action::can_effect(State& state) {
return ((state & $positive_preconds) == $positive_preconds) &&
((state & $negative_preconds) == ALL_ZERO);
}
State Action::apply_effect(State& state) {
return (state | $positive_effects) & ~$negative_effects;
}
int distance_to_goal(State& from, State& to) {
auto result = from ^ to;
return result.count();
}
Script reconstruct_path(std::unordered_map<Action, Action>& came_from, Action& current) {
Script total_path{current};
int count = 0;
while(came_from.contains(current) && count++ < 10) {
current = came_from.at(current);
if(current != FINAL_ACTION) {
total_path.push_front(current);
}
}
return total_path;
}
inline int h(State& start, State& goal) {
return distance_to_goal(start, goal);
}
inline int d(State& start, State& goal) {
return distance_to_goal(start, goal);
}
ActionState find_lowest(std::unordered_map<ActionState, int>& open_set) {
check(!open_set.empty(), "open set can't be empty in find_lowest");
const ActionState *result = nullptr;
int lowest_score = SCORE_MAX;
for(auto& kv : open_set) {
if(kv.second < lowest_score) {
lowest_score = kv.second;
result = &kv.first;
}
}
return *result;
}
std::optional<Script> plan_actions(std::vector<Action>& actions, State& start, State& goal) {
std::unordered_map<ActionState, int> open_set;
std::unordered_map<Action, Action> came_from;
std::unordered_map<State, int> g_score;
ActionState start_state{FINAL_ACTION, start};
g_score[start] = 0;
open_set[start_state] = g_score[start] + h(start, goal);
while(!open_set.empty()) {
auto current = find_lowest(open_set);
if(is_subset(current.state, goal)) {
return std::make_optional<Script>(reconstruct_path(came_from, current.action));
}
open_set.erase(current);
for(auto& neighbor_action : actions) {
// calculate the State being current/neighbor
if(!neighbor_action.can_effect(current.state)) {
continue;
}
auto neighbor = neighbor_action.apply_effect(current.state);
int d_score = d(current.state, neighbor);
int tentative_g_score = g_score[current.state] + d_score;
int neighbor_g_score = g_score.contains(neighbor) ? g_score[neighbor] : SCORE_MAX;
if(tentative_g_score < neighbor_g_score) {
came_from.insert_or_assign(neighbor_action, current.action);
g_score[neighbor] = tentative_g_score;
// open_set gets the fScore
ActionState neighbor_as{neighbor_action, neighbor};
open_set[neighbor_as] = tentative_g_score + h(neighbor, goal);
}
}
}
return std::nullopt;
}
static AIManager AIMGR;
static bool initialized = false;
@ -180,9 +60,9 @@ namespace ai {
return result;
}
void init() {
void init(std::string config_path) {
initialized = true;
Config config("assets/ai.json");
Config config(config_path);
// profile specifies what keys (bitset indexes) are allowed
// and how they map to the bitset of State

@ -6,62 +6,9 @@
#include <optional>
#include <nlohmann/json.hpp>
#include "config.hpp"
#include "goap.hpp"
namespace ai {
constexpr const int SCORE_MAX = std::numeric_limits<int>::max();
constexpr const size_t STATE_MAX = 32;
using State = std::bitset<STATE_MAX>;
const State ALL_ZERO;
const State ALL_ONES = ~ALL_ZERO;
struct Action {
std::string $name;
int $cost = 0;
State $positive_preconds;
State $negative_preconds;
State $positive_effects;
State $negative_effects;
Action(std::string name, int cost) :
$name(name), $cost(cost) { }
void needs(int name, bool val);
void effect(int name, bool val);
bool can_effect(State& state);
State apply_effect(State& state);
bool operator==(const Action& other) const {
return other.$name == $name;
}
};
using Script = std::deque<Action>;
const Action FINAL_ACTION("END", SCORE_MAX);
struct ActionState {
Action action;
State state;
ActionState(Action action, State state) :
action(action), state(state) {}
bool operator==(const ActionState& other) const {
return other.action == action && other.state == state;
}
};
bool is_subset(State& source, State& target);
int distance_to_goal(State& from, State& to);
std::optional<Script> plan_actions(std::vector<Action>& actions, State& start, State& goal);
struct AIManager {
nlohmann::json profile;
@ -70,7 +17,7 @@ namespace ai {
std::unordered_map<std::string, std::vector<Action>> scripts;
};
void init();
void init(std::string config_path);
Action config_action(nlohmann::json& profile, nlohmann::json& config);
State config_state(nlohmann::json& profile, nlohmann::json& config);
@ -82,15 +29,3 @@ namespace ai {
std::optional<Script> plan(std::string script_name, State start, State goal);
}
template<> struct std::hash<ai::Action> {
size_t operator()(const ai::Action& p) const {
return std::hash<std::string>{}(p.$name);
}
};
template<> struct std::hash<ai::ActionState> {
size_t operator()(const ai::ActionState& p) const {
return std::hash<ai::Action>{}(p.action) ^ std::hash<ai::State>{}(p.state);
}
};

@ -0,0 +1,127 @@
#include "dbc.hpp"
#include "goap.hpp"
namespace ai {
using namespace nlohmann;
using namespace dbc;
bool is_subset(State& source, State& target) {
State result = source & target;
return result == target;
}
void Action::needs(int name, bool val) {
if(val) {
$positive_preconds[name] = true;
$negative_preconds[name] = false;
} else {
$negative_preconds[name] = true;
$positive_preconds[name] = false;
}
}
void Action::effect(int name, bool val) {
if(val) {
$positive_effects[name] = true;
$negative_effects[name] = false;
} else {
$negative_effects[name] = true;
$positive_effects[name] = false;
}
}
bool Action::can_effect(State& state) {
return ((state & $positive_preconds) == $positive_preconds) &&
((state & $negative_preconds) == ALL_ZERO);
}
State Action::apply_effect(State& state) {
return (state | $positive_effects) & ~$negative_effects;
}
int distance_to_goal(State& from, State& to) {
auto result = from ^ to;
return result.count();
}
Script reconstruct_path(std::unordered_map<Action, Action>& came_from, Action& current) {
Script total_path{current};
int count = 0;
while(came_from.contains(current) && count++ < 10) {
current = came_from.at(current);
if(current != FINAL_ACTION) {
total_path.push_front(current);
}
}
return total_path;
}
inline int h(State& start, State& goal) {
return distance_to_goal(start, goal);
}
inline int d(State& start, State& goal) {
return distance_to_goal(start, goal);
}
ActionState find_lowest(std::unordered_map<ActionState, int>& open_set) {
check(!open_set.empty(), "open set can't be empty in find_lowest");
const ActionState *result = nullptr;
int lowest_score = SCORE_MAX;
for(auto& kv : open_set) {
if(kv.second < lowest_score) {
lowest_score = kv.second;
result = &kv.first;
}
}
return *result;
}
std::optional<Script> plan_actions(std::vector<Action>& actions, State& start, State& goal) {
std::unordered_map<ActionState, int> open_set;
std::unordered_map<Action, Action> came_from;
std::unordered_map<State, int> g_score;
ActionState start_state{FINAL_ACTION, start};
g_score[start] = 0;
open_set[start_state] = g_score[start] + h(start, goal);
while(!open_set.empty()) {
auto current = find_lowest(open_set);
if(is_subset(current.state, goal)) {
return std::make_optional<Script>(reconstruct_path(came_from, current.action));
}
open_set.erase(current);
for(auto& neighbor_action : actions) {
// calculate the State being current/neighbor
if(!neighbor_action.can_effect(current.state)) {
continue;
}
auto neighbor = neighbor_action.apply_effect(current.state);
int d_score = d(current.state, neighbor);
int tentative_g_score = g_score[current.state] + d_score;
int neighbor_g_score = g_score.contains(neighbor) ? g_score[neighbor] : SCORE_MAX;
if(tentative_g_score < neighbor_g_score) {
came_from.insert_or_assign(neighbor_action, current.action);
g_score[neighbor] = tentative_g_score;
// open_set gets the fScore
ActionState neighbor_as{neighbor_action, neighbor};
open_set[neighbor_as] = tentative_g_score + h(neighbor, goal);
}
}
}
return std::nullopt;
}
}

@ -0,0 +1,76 @@
#pragma once
#include <vector>
#include "matrix.hpp"
#include <bitset>
#include <limits>
#include <optional>
#include <nlohmann/json.hpp>
#include "config.hpp"
namespace ai {
constexpr const int SCORE_MAX = std::numeric_limits<int>::max();
constexpr const size_t STATE_MAX = 32;
using State = std::bitset<STATE_MAX>;
const State ALL_ZERO;
const State ALL_ONES = ~ALL_ZERO;
struct Action {
std::string $name;
int $cost = 0;
State $positive_preconds;
State $negative_preconds;
State $positive_effects;
State $negative_effects;
Action(std::string name, int cost) :
$name(name), $cost(cost) { }
void needs(int name, bool val);
void effect(int name, bool val);
bool can_effect(State& state);
State apply_effect(State& state);
bool operator==(const Action& other) const {
return other.$name == $name;
}
};
using Script = std::deque<Action>;
const Action FINAL_ACTION("END", SCORE_MAX);
struct ActionState {
Action action;
State state;
ActionState(Action action, State state) :
action(action), state(state) {}
bool operator==(const ActionState& other) const {
return other.action == action && other.state == state;
}
};
bool is_subset(State& source, State& target);
int distance_to_goal(State& from, State& to);
std::optional<Script> plan_actions(std::vector<Action>& actions, State& start, State& goal);
}
template<> struct std::hash<ai::Action> {
size_t operator()(const ai::Action& p) const {
return std::hash<std::string>{}(p.$name);
}
};
template<> struct std::hash<ai::ActionState> {
size_t operator()(const ai::ActionState& p) const {
return std::hash<ai::Action>{}(p.action) ^ std::hash<ai::State>{}(p.state);
}
};

@ -80,9 +80,8 @@ dependencies += [
sfml_window, ftxui_screen, ftxui_dom, ftxui_component
]
sources = [
'ai.cpp',
'ansi_parser.cpp',
'autowalker.cpp',
'boss_fight_ui.cpp',
@ -93,7 +92,7 @@ sources = [
'config.cpp',
'dbc.cpp',
'devices.cpp',
'ai.cpp',
'goap.cpp',
'guecs.cpp',
'gui_fsm.cpp',
'inventory.cpp',

@ -103,105 +103,9 @@ TEST_CASE("basic feature tests", "[ai]") {
REQUIRE(state[ENEMY_DEAD]);
}
TEST_CASE("wargame test from cppAI", "[ai]") {
std::vector<ai::Action> actions;
auto profile = R"({
"target_acquired": 0,
"target_lost": 1,
"target_in_warhead_range": 2,
"target_dead": 3
})"_json;
// Now establish all the possible actions for the action pool
// In this example we're providing the AI some different FPS actions
auto config = R"({
"name": "searchSpiral",
"cost": 5,
"needs": {
"target_acquired": false,
"target_lost": true
},
"effects": {
"target_acquired": true
}
})"_json;
auto spiral = ai::config_action(profile, config);
actions.push_back(spiral);
config = R"({
"name": "searchSerpentine",
"cost": 5,
"needs": {
"target_acquired": false,
"target_lost": false
},
"effects": {
"target_acquired": true
}
})"_json;
auto serpentine = ai::config_action(profile, config);
actions.push_back(serpentine);
config = R"({
"name": "interceptTarget",
"cost": 5,
"needs": {
"target_acquired": true,
"target_dead": false
},
"effects": {
"target_in_warhead_range": true
}
})"_json;
auto intercept = ai::config_action(profile, config);
actions.push_back(intercept);
config = R"({
"name": "detonateNearTarget",
"cost": 5,
"needs": {
"target_in_warhead_range": true,
"target_acquired": true,
"target_dead": false
},
"effects": {
"target_dead": true
}
})"_json;
auto detonateNearTarget = ai::config_action(profile, config);
actions.push_back(detonateNearTarget);
// Here's the initial state...
config = R"({
"target_acquired": false,
"target_lost": true,
"target_in_warhead_range": false,
"target_dead": false
})"_json;
auto initial_state = ai::config_state(profile, config);
// ...and the goal state
config = R"({
"target_dead": true
})"_json;
auto goal_target_dead = ai::config_state(profile, config);
auto result = ai::plan_actions(actions, initial_state, goal_target_dead);
REQUIRE(result != std::nullopt);
auto state = initial_state;
for(auto& action : *result) {
fmt::println("ACTION: {}", action.$name);
state = action.apply_effect(state);
}
REQUIRE(state[profile["target_dead"]]);
}
TEST_CASE("ai as a module like sound/sprites", "[ai]") {
ai::init();
ai::init("tests/ai_fixture.json");
auto start = ai::load_state("test_start");
auto goal = ai::load_state("test_goal");
@ -216,5 +120,4 @@ TEST_CASE("ai as a module like sound/sprites", "[ai]") {
}
REQUIRE(state[ai::state_id("target_dead")]);
}

@ -0,0 +1,85 @@
{
"profile": {
"target_acquired": 0,
"target_lost": 1,
"target_in_warhead_range": 2,
"target_dead": 3
},
"actions": [
{
"name": "searchSpiral",
"cost": 10,
"needs": {
"target_acquired": false,
"target_lost": true
},
"effects": {
"target_acquired": true
}
},
{
"name": "searchSerpentine",
"cost": 5,
"needs": {
"target_acquired": false,
"target_lost": false
},
"effects": {
"target_acquired": true
}
},
{
"name": "searchSpiral",
"cost": 5,
"needs": {
"target_acquired": false,
"target_lost": true
},
"effects": {
"target_acquired": true
}
},
{
"name": "interceptTarget",
"cost": 5,
"needs": {
"target_acquired": true,
"target_dead": false
},
"effects": {
"target_in_warhead_range": true
}
},
{
"name": "detonateNearTarget",
"cost": 5,
"needs": {
"target_in_warhead_range": true,
"target_acquired": true,
"target_dead": false
},
"effects": {
"target_dead": true
}
}
],
"states": {
"test_start": {
"target_acquired": false,
"target_lost": true,
"target_in_warhead_range": false,
"target_dead": false
},
"test_goal": {
"target_dead": true
}
},
"scripts": {
"test1": [
"searchSpiral",
"searchSerpentine",
"searchSpiral",
"interceptTarget",
"detonateNearTarget"]
}
}
Loading…
Cancel
Save