diff --git a/common/common.cmake b/common/common.cmake new file mode 100644 index 000000000000..a60eb33af851 --- /dev/null +++ b/common/common.cmake @@ -0,0 +1,44 @@ +function(gpt4all_add_warning_options target) + if (MSVC) + return() + endif() + target_compile_options("${target}" PRIVATE + # base options + -Wall + -Wextra + # extra options + -Wcast-align + -Wextra-semi + -Wformat=2 + -Wmissing-include-dirs + -Wnull-dereference + -Wstrict-overflow=2 + -Wvla + # errors + -Werror=format-security + -Werror=init-self + -Werror=pointer-arith + -Werror=undef + # disabled warnings + -Wno-sign-compare + -Wno-unused-parameter + -Wno-unused-function + -Wno-unused-variable + ) + if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + target_compile_options("${target}" PRIVATE + -Wduplicated-branches + -Wduplicated-cond + -Wlogical-op + -Wno-reorder + -Wno-null-dereference + ) + elseif (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") + target_compile_options("${target}" PRIVATE + -Wunreachable-code-break + -Wunreachable-code-return + -Werror=pointer-integer-compare + -Wno-reorder-ctor + ) + endif() +endfunction() diff --git a/gpt4all-backend/CMakeLists.txt b/gpt4all-backend/CMakeLists.txt index 4605b139fd22..4bb7035477ea 100644 --- a/gpt4all-backend/CMakeLists.txt +++ b/gpt4all-backend/CMakeLists.txt @@ -1,4 +1,7 @@ cmake_minimum_required(VERSION 3.23) # for FILE_SET + +include(../common/common.cmake) + set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -94,8 +97,6 @@ if (LLMODEL_ROCM) list(APPEND BUILD_VARIANTS rocm rocm-avxonly) endif() -set(CMAKE_VERBOSE_MAKEFILE ON) - # Go through each build variant foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS) # Determine flags @@ -151,6 +152,7 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS) # Add each individual implementations add_library(llamamodel-mainline-${BUILD_VARIANT} SHARED src/llamamodel.cpp src/llmodel_shared.cpp) + gpt4all_add_warning_options(llamamodel-mainline-${BUILD_VARIANT}) target_compile_definitions(llamamodel-mainline-${BUILD_VARIANT} PRIVATE LLAMA_VERSIONS=>=3 LLAMA_DATE=999999) target_include_directories(llamamodel-mainline-${BUILD_VARIANT} PRIVATE @@ -169,6 +171,7 @@ add_library(llmodel src/llmodel_c.cpp src/llmodel_shared.cpp ) +gpt4all_add_warning_options(llmodel) target_sources(llmodel PUBLIC FILE_SET public_headers TYPE HEADERS BASE_DIRS include FILES include/gpt4all-backend/llmodel.h diff --git a/gpt4all-backend/include/gpt4all-backend/llmodel.h b/gpt4all-backend/include/gpt4all-backend/llmodel.h index 9ea14092149f..1b60dae6faa2 100644 --- a/gpt4all-backend/include/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/include/gpt4all-backend/llmodel.h @@ -146,7 +146,7 @@ class LLModel { virtual bool supportsEmbedding() const = 0; virtual bool supportsCompletion() const = 0; virtual bool loadModel(const std::string &modelPath, int n_ctx, int ngl) = 0; - virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; }; + virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; } virtual bool isEmbeddingModel(const std::string &modelPath) const { (void)modelPath; return false; } virtual bool isModelLoaded() const = 0; virtual size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) = 0; diff --git a/gpt4all-backend/src/llmodel_shared.cpp b/gpt4all-backend/src/llmodel_shared.cpp index b0c31d11e78b..c1e969d4b940 100644 --- a/gpt4all-backend/src/llmodel_shared.cpp +++ b/gpt4all-backend/src/llmodel_shared.cpp @@ -260,7 +260,7 @@ void LLModel::generateResponse(std::function cachedTokens.push_back(new_tok.value()); cachedResponse += new_piece; - auto accept = [this, &promptCtx, &cachedTokens, &new_tok, allowContextShift]() -> bool { + auto accept = [this, &promptCtx, &new_tok, allowContextShift]() -> bool { // Shift context if out of space if (promptCtx.n_past >= promptCtx.n_ctx) { (void)allowContextShift; diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index fff47c40383a..f696cdc475f4 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -1,5 +1,7 @@ cmake_minimum_required(VERSION 3.25) # for try_compile SOURCE_FROM_VAR +include(../common/common.cmake) + set(APP_VERSION_MAJOR 3) set(APP_VERSION_MINOR 4) set(APP_VERSION_PATCH 1) @@ -157,6 +159,7 @@ qt_add_executable(chat src/xlsxtomd.cpp src/xlsxtomd.h ${CHAT_EXE_RESOURCES} ) +gpt4all_add_warning_options(chat) qt_add_qml_module(chat URI gpt4all diff --git a/gpt4all-chat/src/chat.h b/gpt4all-chat/src/chat.h index da644c89e7e1..2d98322fef8d 100644 --- a/gpt4all-chat/src/chat.h +++ b/gpt4all-chat/src/chat.h @@ -33,7 +33,7 @@ class Chat : public QObject Q_PROPERTY(ResponseState responseState READ responseState NOTIFY responseStateChanged) Q_PROPERTY(QList collectionList READ collectionList NOTIFY collectionListChanged) Q_PROPERTY(QString modelLoadingError READ modelLoadingError NOTIFY modelLoadingErrorChanged) - Q_PROPERTY(QString tokenSpeed READ tokenSpeed NOTIFY tokenSpeedChanged); + Q_PROPERTY(QString tokenSpeed READ tokenSpeed NOTIFY tokenSpeedChanged) Q_PROPERTY(QString deviceBackend READ deviceBackend NOTIFY loadedModelInfoChanged) Q_PROPERTY(QString device READ device NOTIFY loadedModelInfoChanged) Q_PROPERTY(QString fallbackReason READ fallbackReason NOTIFY loadedModelInfoChanged) diff --git a/gpt4all-chat/src/chatllm.cpp b/gpt4all-chat/src/chatllm.cpp index 36c3a2694307..2b133ce760bf 100644 --- a/gpt4all-chat/src/chatllm.cpp +++ b/gpt4all-chat/src/chatllm.cpp @@ -585,7 +585,7 @@ bool ChatLLM::loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadPro modelLoadProps.insert("$duration", modelLoadTimer.elapsed() / 1000.); return true; -}; +} bool ChatLLM::isModelLoaded() const { diff --git a/gpt4all-chat/src/chatmodel.h b/gpt4all-chat/src/chatmodel.h index d50103151499..43b96246252b 100644 --- a/gpt4all-chat/src/chatmodel.h +++ b/gpt4all-chat/src/chatmodel.h @@ -65,8 +65,8 @@ struct ChatItem Q_PROPERTY(bool thumbsDownState MEMBER thumbsDownState) Q_PROPERTY(QList sources MEMBER sources) Q_PROPERTY(QList consolidatedSources MEMBER consolidatedSources) - Q_PROPERTY(QList promptAttachments MEMBER promptAttachments); - Q_PROPERTY(QString promptPlusAttachments READ promptPlusAttachments); + Q_PROPERTY(QList promptAttachments MEMBER promptAttachments) + Q_PROPERTY(QString promptPlusAttachments READ promptPlusAttachments) public: QString promptPlusAttachments() const diff --git a/gpt4all-chat/src/database.cpp b/gpt4all-chat/src/database.cpp index 9b1e9ecdb624..02261cb4c92c 100644 --- a/gpt4all-chat/src/database.cpp +++ b/gpt4all-chat/src/database.cpp @@ -296,10 +296,12 @@ static bool selectAllUncompletedChunks(QSqlQuery &q, QHash= 1.0) { m_discoverInProgress = false; - emit discoverInProgressChanged();; + emit discoverInProgressChanged(); } reply->deleteLater(); diff --git a/gpt4all-chat/src/mysettings.cpp b/gpt4all-chat/src/mysettings.cpp index 97af196fa4ca..38c8ab6821f5 100644 --- a/gpt4all-chat/src/mysettings.cpp +++ b/gpt4all-chat/src/mysettings.cpp @@ -186,7 +186,7 @@ void MySettings::restoreModelDefaults(const ModelInfo &info) setModelTemperature(info, info.m_temperature); setModelTopP(info, info.m_topP); setModelMinP(info, info.m_minP); - setModelTopK(info, info.m_topK);; + setModelTopK(info, info.m_topK); setModelMaxLength(info, info.m_maxLength); setModelPromptBatchSize(info, info.m_promptBatchSize); setModelContextLength(info, info.m_contextLength);