Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chat: replace Jinja2Cpp with minja #3433

Merged
merged 3 commits into from
Jan 30, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -17,9 +17,9 @@
[submodule "gpt4all-chat/deps/QXlsx"]
path = gpt4all-chat/deps/QXlsx
url = https://github.com/nomic-ai/QXlsx.git
[submodule "gpt4all-chat/deps/Jinja2Cpp"]
path = gpt4all-chat/deps/Jinja2Cpp
url = https://github.com/nomic-ai/jinja2cpp.git
[submodule "gpt4all-chat/deps/rapidjson"]
path = gpt4all-chat/deps/rapidjson
url = https://github.com/nomic-ai/rapidjson.git
[submodule "gpt4all-chat/deps/minja"]
path = gpt4all-chat/deps/minja
url = https://github.com/nomic-ai/minja.git
[submodule "gpt4all-chat/deps/json"]
path = gpt4all-chat/deps/json
url = https://github.com/nlohmann/json.git
3 changes: 3 additions & 0 deletions gpt4all-chat/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
### Added
- Support DeepSeek-R1 Qwen models ([#3431](https://github.com/nomic-ai/gpt4all/pull/3431))

### Changed
- Use minja instead of Jinja2Cpp for significantly improved template compatibility ([#3433](https://github.com/nomic-ai/gpt4all/pull/3433))

### Fixed
- Fix regression while using localdocs with server API ([#3410](https://github.com/nomic-ai/gpt4all/pull/3410))
- Don't show system messages in server chat view ([#3411](https://github.com/nomic-ai/gpt4all/pull/3411))
7 changes: 5 additions & 2 deletions gpt4all-chat/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -51,7 +51,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if (MSVC)
# Enable accurate __cplusplus macro to fix errors in Jinja2Cpp
# Enable accurate __cplusplus macro
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)
endif()

@@ -437,7 +437,10 @@ else()
target_link_libraries(chat PRIVATE pdfium)
endif()
target_link_libraries(chat
PRIVATE llmodel SingleApplication fmt::fmt duckx::duckx QXlsx jinja2cpp)
PRIVATE llmodel SingleApplication fmt::fmt duckx::duckx QXlsx)
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include)
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include/nlohmann)
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/minja/include)

if (APPLE)
target_link_libraries(chat PRIVATE ${COCOA_LIBRARY})
9 changes: 0 additions & 9 deletions gpt4all-chat/deps/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -15,15 +15,6 @@ add_subdirectory(DuckX)
set(QT_VERSION_MAJOR 6)
add_subdirectory(QXlsx/QXlsx)

# forked dependency of Jinja2Cpp
set(RAPIDJSON_BUILD_DOC OFF)
set(RAPIDJSON_BUILD_EXAMPLES OFF)
set(RAPIDJSON_BUILD_TESTS OFF)
set(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT OFF)
add_subdirectory(rapidjson)

add_subdirectory(Jinja2Cpp)

if (NOT GPT4ALL_USING_QTPDF)
# If we do not use QtPDF, we need to get PDFium.
set(GPT4ALL_PDFIUM_TAG "chromium/6954")
1 change: 0 additions & 1 deletion gpt4all-chat/deps/Jinja2Cpp
Submodule Jinja2Cpp deleted from ce10f7
1 change: 1 addition & 0 deletions gpt4all-chat/deps/json
Submodule json added at 606b63
1 change: 1 addition & 0 deletions gpt4all-chat/deps/minja
Submodule minja added at 491f5c
1 change: 0 additions & 1 deletion gpt4all-chat/deps/rapidjson
Submodule rapidjson deleted from 9b547e
98 changes: 38 additions & 60 deletions gpt4all-chat/src/chatllm.cpp
Original file line number Diff line number Diff line change
@@ -12,12 +12,8 @@
#include "toolcallparser.h"

#include <fmt/format.h>

#include <jinja2cpp/error_info.h>
#include <jinja2cpp/template.h>
#include <jinja2cpp/template_env.h>
#include <jinja2cpp/user_callable.h>
#include <jinja2cpp/value.h>
#include <minja/minja.hpp>
#include <nlohmann/json.hpp>

#include <QDataStream>
#include <QDebug>
@@ -60,59 +56,40 @@
using namespace Qt::Literals::StringLiterals;
using namespace ToolEnums;
namespace ranges = std::ranges;
using json = nlohmann::ordered_json;

//#define DEBUG
//#define DEBUG_MODEL_LOADING

static std::string jinjaGetStringArg(const jinja2::ValuesMap &args, const std::string &name)
{
auto arg = args.find(name);
if (arg == args.end() || !arg->second.isString())
throw std::runtime_error(fmt::format("'{}' argument to raise_exception() must be a string", name));
return arg->second.asString();
}

// NOTE: not threadsafe
static jinja2::TemplateEnv *jinjaEnv()
static const std::shared_ptr<minja::Context> &jinjaEnv()
{
static std::optional<jinja2::TemplateEnv> environment;
static std::shared_ptr<minja::Context> environment;
if (!environment) {
auto &env = environment.emplace();
auto &settings = env.GetSettings();
settings.trimBlocks = true;
settings.lstripBlocks = true;
env.AddGlobal("raise_exception", jinja2::UserCallable(
/*callable*/ [](auto &params) -> jinja2::Value {
auto message = jinjaGetStringArg(params.args, "message");
throw std::runtime_error(fmt::format("Jinja template error: {}", message));
},
/*argsInfo*/ { jinja2::ArgInfo("message", /*isMandatory*/ true) }
));
env.AddGlobal("strftime_now", jinja2::UserCallable(
/*callable*/ [](auto &params) -> jinja2::Value {
environment = minja::Context::builtins();
environment->set("strftime_now", minja::simple_function(
"strftime_now", { "format" },
[](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {
auto format = args.at("format").get<std::string>();
using Clock = std::chrono::system_clock;
auto format = jinjaGetStringArg(params.args, "format");
time_t nowUnix = Clock::to_time_t(Clock::now());
auto localDate = *std::localtime(&nowUnix);
std::ostringstream ss;
ss << std::put_time(&localDate, format.c_str());
return ss.str();
},
/*argsInfo*/ { jinja2::ArgInfo("format", /*isMandatory*/ true) }
}
));
env.AddGlobal("regex_replace", jinja2::UserCallable(
/*callable*/ [](auto &params) -> jinja2::Value {
auto str = jinjaGetStringArg(params.args, "str" );
auto pattern = jinjaGetStringArg(params.args, "pattern");
auto repl = jinjaGetStringArg(params.args, "repl" );
environment->set("regex_replace", minja::simple_function(
"regex_replace", { "str", "pattern", "repl" },
[](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {
auto str = args.at("str" ).get<std::string>();
auto pattern = args.at("pattern").get<std::string>();
auto repl = args.at("repl" ).get<std::string>();
return std::regex_replace(str, std::regex(pattern), repl);
},
/*argsInfo*/ { jinja2::ArgInfo("str", /*isMandatory*/ true),
jinja2::ArgInfo("pattern", /*isMandatory*/ true),
jinja2::ArgInfo("repl", /*isMandatory*/ true) }
}
));
}
return &*environment;
return environment;
}

class LLModelStore {
@@ -772,19 +749,18 @@ static uint parseJinjaTemplateVersion(QStringView tmpl)
return 0;
}

static auto loadJinjaTemplate(
std::optional<jinja2::Template> &tmpl /*out*/, const std::string &source
) -> jinja2::Result<void>
static std::shared_ptr<minja::TemplateNode> loadJinjaTemplate(const std::string &source)
{
tmpl.emplace(jinjaEnv());
return tmpl->Load(source);
return minja::Parser::parse(source, { .trim_blocks = true, .lstrip_blocks = true, .keep_trailing_newline = false });
}

std::optional<std::string> ChatLLM::checkJinjaTemplateError(const std::string &source)
{
std::optional<jinja2::Template> tmpl;
if (auto res = loadJinjaTemplate(tmpl, source); !res)
return res.error().ToString();
try {
loadJinjaTemplate(source);
} catch (const std::runtime_error &e) {
return e.what();
}
return std::nullopt;
}

@@ -816,13 +792,13 @@ std::string ChatLLM::applyJinjaTemplate(std::span<const MessageItem> items) cons
uint version = parseJinjaTemplateVersion(chatTemplate);

auto makeMap = [version](const MessageItem &item) {
return jinja2::GenericMap([msg = std::make_shared<JinjaMessage>(version, item)] { return msg.get(); });
return JinjaMessage(version, item).AsJson();
};

std::unique_ptr<MessageItem> systemItem;
bool useSystem = !isAllSpace(systemMessage);

jinja2::ValuesList messages;
json::array_t messages;
messages.reserve(useSystem + items.size());
if (useSystem) {
systemItem = std::make_unique<MessageItem>(MessageItem::Type::System, systemMessage.toUtf8());
@@ -831,27 +807,29 @@ std::string ChatLLM::applyJinjaTemplate(std::span<const MessageItem> items) cons
for (auto &item : items)
messages.emplace_back(makeMap(item));

jinja2::ValuesList toolList;
json::array_t toolList;
const int toolCount = ToolModel::globalInstance()->count();
for (int i = 0; i < toolCount; ++i) {
Tool *t = ToolModel::globalInstance()->get(i);
toolList.push_back(t->jinjaValue());
}

jinja2::ValuesMap params {
json::object_t params {
{ "messages", std::move(messages) },
{ "add_generation_prompt", true },
{ "toolList", toolList },
};
for (auto &[name, token] : model->specialTokens())
params.emplace(std::move(name), std::move(token));

std::optional<jinja2::Template> tmpl;
auto maybeRendered = loadJinjaTemplate(tmpl, chatTemplate.toStdString())
.and_then([&] { return tmpl->RenderAsString(params); });
if (!maybeRendered)
throw std::runtime_error(fmt::format("Failed to parse chat template: {}", maybeRendered.error().ToString()));
return *maybeRendered;
try {
auto tmpl = loadJinjaTemplate(chatTemplate.toStdString());
auto context = minja::Context::make(minja::Value(std::move(params)), jinjaEnv());
return tmpl->render(context);
} catch (const std::runtime_error &e) {
throw std::runtime_error(fmt::format("Failed to parse chat template: {}", e.what()));
}
Q_UNREACHABLE();
}

auto ChatLLM::promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,
154 changes: 59 additions & 95 deletions gpt4all-chat/src/jinja_helpers.cpp
Original file line number Diff line number Diff line change
@@ -7,111 +7,75 @@
#include <QString>
#include <QUrl>

#include <memory>
#include <iterator>
#include <map>
#include <ranges>
#include <vector>

using namespace std::literals::string_view_literals;
namespace views = std::views;
using json = nlohmann::ordered_json;


JinjaResultInfo::~JinjaResultInfo() = default;

const JinjaFieldMap<ResultInfo> JinjaResultInfo::s_fields = {
{ "collection", [](auto &s) { return s.collection.toStdString(); } },
{ "path", [](auto &s) { return s.path .toStdString(); } },
{ "file", [](auto &s) { return s.file .toStdString(); } },
{ "title", [](auto &s) { return s.title .toStdString(); } },
{ "author", [](auto &s) { return s.author .toStdString(); } },
{ "date", [](auto &s) { return s.date .toStdString(); } },
{ "text", [](auto &s) { return s.text .toStdString(); } },
{ "page", [](auto &s) { return s.page; } },
{ "file_uri", [](auto &s) { return s.fileUri() .toStdString(); } },
};

JinjaPromptAttachment::~JinjaPromptAttachment() = default;

const JinjaFieldMap<PromptAttachment> JinjaPromptAttachment::s_fields = {
{ "url", [](auto &s) { return s.url.toString() .toStdString(); } },
{ "file", [](auto &s) { return s.file() .toStdString(); } },
{ "processed_content", [](auto &s) { return s.processedContent().toStdString(); } },
};

std::vector<std::string> JinjaMessage::GetKeys() const
json::object_t JinjaResultInfo::AsJson() const
{
std::vector<std::string> result;
auto &keys = this->keys();
result.reserve(keys.size());
result.assign(keys.begin(), keys.end());
return result;
return {
{ "collection", m_source->collection.toStdString() },
{ "path", m_source->path .toStdString() },
{ "file", m_source->file .toStdString() },
{ "title", m_source->title .toStdString() },
{ "author", m_source->author .toStdString() },
{ "date", m_source->date .toStdString() },
{ "text", m_source->text .toStdString() },
{ "page", m_source->page },
{ "file_uri", m_source->fileUri() .toStdString() },
};
}

auto JinjaMessage::keys() const -> const std::unordered_set<std::string_view> &
json::object_t JinjaPromptAttachment::AsJson() const
{
static const std::unordered_set<std::string_view> baseKeys
{ "role", "content" };
static const std::unordered_set<std::string_view> userKeys
{ "role", "content", "sources", "prompt_attachments" };
switch (m_item->type()) {
using enum MessageItem::Type;
case System:
case Response:
case ToolResponse:
return baseKeys;
case Prompt:
return userKeys;
break;
}
Q_UNREACHABLE();
return {
{ "url", m_attachment->url.toString() .toStdString() },
{ "file", m_attachment->file() .toStdString() },
{ "processed_content", m_attachment->processedContent().toStdString() },
};
}

bool operator==(const JinjaMessage &a, const JinjaMessage &b)
json::object_t JinjaMessage::AsJson() const
{
if (a.m_item == b.m_item)
return true;
const auto &[ia, ib] = std::tie(*a.m_item, *b.m_item);
auto type = ia.type();
if (type != ib.type() || ia.content() != ib.content())
return false;

switch (type) {
using enum MessageItem::Type;
case System:
case Response:
case ToolResponse:
return true;
case Prompt:
return ia.sources() == ib.sources() && ia.promptAttachments() == ib.promptAttachments();
break;
}
Q_UNREACHABLE();
}

const JinjaFieldMap<JinjaMessage> JinjaMessage::s_fields = {
{ "role", [](auto &m) {
switch (m.item().type()) {
json::object_t obj;
{
json::string_t role;
switch (m_item->type()) {
using enum MessageItem::Type;
case System: return "system"sv;
case Prompt: return "user"sv;
case Response: return "assistant"sv;
case ToolResponse: return "tool"sv;
break;
case System: role = "system"; break;
case Prompt: role = "user"; break;
case Response: role = "assistant"; break;
case ToolResponse: role = "tool"; break;
}
obj.emplace_back("role", std::move(role));
}
{
QString content;
if (m_version == 0 && m_item->type() == MessageItem::Type::Prompt) {
content = m_item->bakedPrompt();
} else {
content = m_item->content();
}
obj.emplace_back("content", content.toStdString());
}
if (m_item->type() == MessageItem::Type::Prompt) {
{
auto sources = m_item->sources() | views::transform([](auto &r) {
return JinjaResultInfo(r).AsJson();
});
obj.emplace("sources", json::array_t(sources.begin(), sources.end()));
}
{
auto attachments = m_item->promptAttachments() | views::transform([](auto &pa) {
return JinjaPromptAttachment(pa).AsJson();
});
obj.emplace("prompt_attachments", json::array_t(attachments.begin(), attachments.end()));
}
Q_UNREACHABLE();
} },
{ "content", [](auto &m) {
if (m.version() == 0 && m.item().type() == MessageItem::Type::Prompt)
return m.item().bakedPrompt().toStdString();
return m.item().content().toStdString();
} },
{ "sources", [](auto &m) {
auto sources = m.item().sources() | views::transform([](auto &r) {
return jinja2::GenericMap([map = std::make_shared<JinjaResultInfo>(r)] { return map.get(); });
});
return jinja2::ValuesList(sources.begin(), sources.end());
} },
{ "prompt_attachments", [](auto &m) {
auto attachments = m.item().promptAttachments() | views::transform([](auto &pa) {
return jinja2::GenericMap([map = std::make_shared<JinjaPromptAttachment>(pa)] { return map.get(); });
});
return jinja2::ValuesList(attachments.begin(), attachments.end());
} },
};
}
return obj;
}
81 changes: 8 additions & 73 deletions gpt4all-chat/src/jinja_helpers.h
Original file line number Diff line number Diff line change
@@ -3,114 +3,49 @@
#include "chatmodel.h"
#include "database.h"

#include <jinja2cpp/value.h>

#include <functional>
#include <ranges>
#include <string>
#include <string_view>
#include <unordered_map>
#include <unordered_set>
#include <nlohmann/json.hpp>

#include <QtGlobal>

namespace views = std::views;

using json = nlohmann::ordered_json;

template <typename T>
using JinjaFieldMap = std::unordered_map<std::string_view, std::function<jinja2::Value (const T &)>>;

template <typename Derived>
class JinjaComparable : public jinja2::IMapItemAccessor {
class JinjaHelper {
public:
JinjaComparable() = default;

bool IsEqual(const jinja2::IComparable &other) const override;

private:
Q_DISABLE_COPY_MOVE(JinjaComparable)
};

template <typename Derived>
class JinjaHelper : public JinjaComparable<Derived> {
public:
size_t GetSize() const override
{ return Derived::s_fields.size(); }

bool HasValue(const std::string &name) const override
{ return Derived::s_fields.contains(name); }

jinja2::Value GetValueByName(const std::string &name) const override;

std::vector<std::string> GetKeys() const override
{ auto keys = views::elements<0>(Derived::s_fields); return { keys.begin(), keys.end() }; }
json::object_t AsJson() const { return static_cast<const Derived *>(this)->AsJson(); }
};

class JinjaResultInfo : public JinjaHelper<JinjaResultInfo> {
public:
explicit JinjaResultInfo(const ResultInfo &source) noexcept
: m_source(&source) {}

~JinjaResultInfo() override;

const ResultInfo &value() const { return *m_source; }

friend bool operator==(const JinjaResultInfo &a, const JinjaResultInfo &b)
{ return a.m_source == b.m_source || *a.m_source == *b.m_source; }
json::object_t AsJson() const;

private:
static const JinjaFieldMap<ResultInfo> s_fields;
const ResultInfo *m_source;

friend class JinjaHelper<JinjaResultInfo>;
};

class JinjaPromptAttachment : public JinjaHelper<JinjaPromptAttachment> {
public:
explicit JinjaPromptAttachment(const PromptAttachment &attachment) noexcept
: m_attachment(&attachment) {}

~JinjaPromptAttachment() override;

const PromptAttachment &value() const { return *m_attachment; }

friend bool operator==(const JinjaPromptAttachment &a, const JinjaPromptAttachment &b)
{ return a.m_attachment == b.m_attachment || *a.m_attachment == *b.m_attachment; }
json::object_t AsJson() const;

private:
static const JinjaFieldMap<PromptAttachment> s_fields;
const PromptAttachment *m_attachment;

friend class JinjaHelper<JinjaPromptAttachment>;
};

class JinjaMessage : public JinjaHelper<JinjaMessage> {
public:
explicit JinjaMessage(uint version, const MessageItem &item) noexcept
: m_version(version), m_item(&item) {}

const JinjaMessage &value () const { return *this; }
uint version() const { return m_version; }
const MessageItem &item () const { return *m_item; }

size_t GetSize() const override { return keys().size(); }
bool HasValue(const std::string &name) const override { return keys().contains(name); }

jinja2::Value GetValueByName(const std::string &name) const override
{ return HasValue(name) ? JinjaHelper::GetValueByName(name) : jinja2::EmptyValue(); }

std::vector<std::string> GetKeys() const override;

private:
auto keys() const -> const std::unordered_set<std::string_view> &;
json::object_t AsJson() const;

private:
static const JinjaFieldMap<JinjaMessage> s_fields;
uint m_version;
uint m_version;
const MessageItem *m_item;

friend class JinjaHelper<JinjaMessage>;
friend bool operator==(const JinjaMessage &a, const JinjaMessage &b);
};

#include "jinja_helpers.inl"
17 changes: 0 additions & 17 deletions gpt4all-chat/src/jinja_helpers.inl

This file was deleted.

19 changes: 1 addition & 18 deletions gpt4all-chat/src/jinja_replacements.cpp
Original file line number Diff line number Diff line change
@@ -5,26 +5,9 @@
// This is a list of prompt templates known to GPT4All and their associated replacements which are automatically used
// instead when loading the chat template from GGUF. These exist for two primary reasons:
// - HuggingFace model authors make ugly chat templates because they do not expect the end user to see them;
// - and our Jinja2Cpp-based template parsing is not fully compatible with HuggingFace transformers and jinja2.

// Below is a list of known incompatibilities with the official HF jinja2 implementation. These are not all necessarily
// reflected in the below substitution list, and this cannot be an exhaustive list because there are a plethora of edge
// cases in template parsing in which jinja2 and Jinja2Cpp differ. These are differences that could be reasonably
// expected to affect chat templates that could be seen in the wild, or that cause a crash:
// - Jinja2Cpp crashes (in debug builds) if given the template `a[""(`
// - Jinja2Cpp does not support these jinja2 constructs:
// - `is not none`
// - list slicing, e.g. `messages[1:]`
// - the jinja2.ext.loopcontrols extension, which HF enables by default
// - a missing space after a quote in substitution (e.g. `{{ 'foo'}}`), which *has* been seen in the wild
// - GPT4All does not currently support these HuggingFace template features:
// - customized "tojson" filter (we provide the built-in Jinja2Cpp one)
// - the AssistantTracker extension

// - and chat templates occasionally use features we do not support. This is less true now that we use minja.

// The substitution list.
// For templates that apply to models listed in models3.json, these should be copied there as well for best
// compatibility with older versions of GPT4All.

const std::unordered_map<std::string_view, std::string_view> CHAT_TEMPLATE_SUBSTITUTIONS {
// calme-2.1-phi3.5-4b.Q6_K.gguf (reported by ThilotE on Discord), Phi-3.5-mini-instruct-Q4_0.gguf (nomic-ai/gpt4all#3345)
37 changes: 18 additions & 19 deletions gpt4all-chat/src/tool.cpp
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
#include "tool.h"

#include <jinja2cpp/value.h>

#include <string>

jinja2::Value Tool::jinjaValue() const
using json = nlohmann::ordered_json;


json::object_t Tool::jinjaValue() const
{
jinja2::ValuesList paramList;
json::array_t paramList;
const QList<ToolParamInfo> p = parameters();
for (auto &info : p) {
std::string typeStr;
@@ -20,26 +21,24 @@ jinja2::Value Tool::jinjaValue() const
case Boolean: typeStr = "boolean"; break;
case Null: typeStr = "null"; break;
}
jinja2::ValuesMap infoMap {
{ "name", info.name.toStdString() },
{ "type", typeStr},
paramList.emplace_back(json::initializer_list_t {
{ "name", info.name.toStdString() },
{ "type", typeStr },
{ "description", info.description.toStdString() },
{ "required", info.required }
};
paramList.push_back(infoMap);
{ "required", info.required },
});
}

jinja2::ValuesMap params {
{ "name", name().toStdString() },
{ "description", description().toStdString() },
{ "function", function().toStdString() },
{ "parameters", paramList },
return {
{ "name", name().toStdString() },
{ "description", description().toStdString() },
{ "function", function().toStdString() },
{ "parameters", paramList },
{ "symbolicFormat", symbolicFormat().toStdString() },
{ "examplePrompt", examplePrompt().toStdString() },
{ "exampleCall", exampleCall().toStdString() },
{ "exampleReply", exampleReply().toStdString() }
{ "examplePrompt", examplePrompt().toStdString() },
{ "exampleCall", exampleCall().toStdString() },
{ "exampleReply", exampleReply().toStdString() },
};
return params;
}

void ToolCallInfo::serialize(QDataStream &stream, int version)
7 changes: 5 additions & 2 deletions gpt4all-chat/src/tool.h
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
#ifndef TOOL_H
#define TOOL_H

#include <nlohmann/json.hpp>

#include <QList>
#include <QObject>
#include <QString>
#include <QVariant>
#include <QtGlobal>

#include <jinja2cpp/value.h>
using json = nlohmann::ordered_json;


namespace ToolEnums
{
@@ -122,7 +125,7 @@ class Tool : public QObject

bool operator==(const Tool &other) const { return function() == other.function(); }

jinja2::Value jinjaValue() const;
json::object_t jinjaValue() const;

Q_SIGNALS:
void runComplete(const ToolCallInfo &info);