Skip to content

Commit

Permalink
modellist: fix models.json cache location (nomic-ai#3052)
Browse files Browse the repository at this point in the history
Signed-off-by: Jared Van Bortel <[email protected]>
  • Loading branch information
cebtenzzre authored Oct 9, 2024
1 parent 0d9b4f0 commit 3165e1d
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 27 deletions.
6 changes: 6 additions & 0 deletions gpt4all-chat/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).

## [Unreleased]

### Fixed
- Fix models.json cache location ([#3052](https://github.com/nomic-ai/gpt4all/pull/3052))

## [3.4.0] - 2024-10-08

### Added
Expand Down Expand Up @@ -147,6 +152,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
- Fix several Vulkan resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
- Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701))

[Unreleased]: https://github.com/nomic-ai/gpt4all/compare/v3.4.0...HEAD
[3.4.0]: https://github.com/nomic-ai/gpt4all/compare/v3.3.0...v3.4.0
[3.3.1]: https://github.com/nomic-ai/gpt4all/compare/v3.3.0...v3.3.1
[3.3.0]: https://github.com/nomic-ai/gpt4all/compare/v3.2.1...v3.3.0
Expand Down
71 changes: 44 additions & 27 deletions gpt4all-chat/src/modellist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <QSettings>
#include <QSslConfiguration>
#include <QSslSocket>
#include <QStandardPaths>
#include <QStringList>
#include <QTextStream>
#include <QTimer>
Expand All @@ -36,13 +37,16 @@
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <optional>
#include <string>
#include <utility>

using namespace Qt::Literals::StringLiterals;

//#define USE_LOCAL_MODELSJSON

#define MODELS_JSON_VERSION "3"

static const QStringList FILENAME_BLACKLIST { u"gpt4all-nomic-embed-text-v1.rmodel"_s };

QString ModelInfo::id() const
Expand Down Expand Up @@ -1331,15 +1335,32 @@ void ModelList::updateModelsFromDirectory()
}
}

#define MODELS_VERSION 3
static QString modelsJsonFilename()
{
return QStringLiteral("models" MODELS_JSON_VERSION ".json");
}

static std::optional<QFile> modelsJsonCacheFile()
{
constexpr auto loc = QStandardPaths::CacheLocation;
QString modelsJsonFname = modelsJsonFilename();
if (auto path = QStandardPaths::locate(loc, modelsJsonFname); !path.isEmpty())
return std::make_optional<QFile>(path);
if (auto path = QStandardPaths::writableLocation(loc); !path.isEmpty())
return std::make_optional<QFile>(path);
return std::nullopt;
}

void ModelList::updateModelsFromJson()
{
QString modelsJsonFname = modelsJsonFilename();

#if defined(USE_LOCAL_MODELSJSON)
QUrl jsonUrl("file://" + QDir::homePath() + u"/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models%1.json"_s.arg(MODELS_VERSION));
QUrl jsonUrl(u"file://%1/dev/large_language_models/gpt4all/gpt4all-chat/metadata/%2"_s.arg(QDir::homePath(), modelsJsonFname));
#else
QUrl jsonUrl(u"http://gpt4all.io/models/models%1.json"_s.arg(MODELS_VERSION));
QUrl jsonUrl(u"http://gpt4all.io/models/%1"_s.arg(modelsJsonFname));
#endif

QNetworkRequest request(jsonUrl);
QSslConfiguration conf = request.sslConfiguration();
conf.setPeerVerifyMode(QSslSocket::VerifyNone);
Expand All @@ -1358,18 +1379,15 @@ void ModelList::updateModelsFromJson()
qWarning() << "WARNING: Could not download models.json synchronously";
updateModelsFromJsonAsync();

QSettings settings;
QFileInfo info(settings.fileName());
QString dirPath = info.canonicalPath();
const QString modelsConfig = dirPath + "/models.json";
QFile file(modelsConfig);
if (!file.open(QIODeviceBase::ReadOnly)) {
qWarning() << "ERROR: Couldn't read models config file: " << modelsConfig;
} else {
QByteArray jsonData = file.readAll();
file.close();
auto cacheFile = modelsJsonCacheFile();
if (!cacheFile) {
// no known location
} else if (cacheFile->open(QIODeviceBase::ReadOnly)) {
QByteArray jsonData = cacheFile->readAll();
cacheFile->close();
parseModelsJsonFile(jsonData, false);
}
} else if (cacheFile->exists())
qWarning() << "ERROR: Couldn't read models.json cache file: " << cacheFile->fileName();
}
delete jsonReply;
}
Expand All @@ -1378,12 +1396,14 @@ void ModelList::updateModelsFromJsonAsync()
{
m_asyncModelRequestOngoing = true;
emit asyncModelRequestOngoingChanged();
QString modelsJsonFname = modelsJsonFilename();

#if defined(USE_LOCAL_MODELSJSON)
QUrl jsonUrl("file://" + QDir::homePath() + u"/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models%1.json"_s.arg(MODELS_VERSION));
QUrl jsonUrl(u"file://%1/dev/large_language_models/gpt4all/gpt4all-chat/metadata/%2"_s.arg(QDir::homePath(), modelsJsonFname));
#else
QUrl jsonUrl(u"http://gpt4all.io/models/models%1.json"_s.arg(MODELS_VERSION));
QUrl jsonUrl(u"http://gpt4all.io/models/%1"_s.arg(modelsJsonFname));
#endif

QNetworkRequest request(jsonUrl);
QSslConfiguration conf = request.sslConfiguration();
conf.setPeerVerifyMode(QSslSocket::VerifyNone);
Expand Down Expand Up @@ -1446,17 +1466,14 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
}

if (save) {
QSettings settings;
QFileInfo info(settings.fileName());
QString dirPath = info.canonicalPath();
const QString modelsConfig = dirPath + "/models.json";
QFile file(modelsConfig);
if (!file.open(QIODeviceBase::WriteOnly)) {
qWarning() << "ERROR: Couldn't write models config file: " << modelsConfig;
} else {
file.write(jsonData);
file.close();
}
auto cacheFile = modelsJsonCacheFile();
if (!cacheFile) {
// no known location
} else if (QFileInfo(*cacheFile).dir().mkpath(u"."_s) && cacheFile->open(QIODeviceBase::WriteOnly)) {
cacheFile->write(jsonData);
cacheFile->close();
} else
qWarning() << "ERROR: Couldn't write models config file: " << cacheFile->fileName();
}

QJsonArray jsonArray = document.array();
Expand Down

0 comments on commit 3165e1d

Please sign in to comment.