Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sync : llama.cpp #1113

Merged
merged 28 commits into from
Feb 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
0752eaf
metal : use residency set for other platforms (llama/11648)
jhen0409 Feb 4, 2025
4f37b52
HIP: force max threads per block to be 1024 (llama/11621)
fxzjshm Feb 4, 2025
387edc9
CUDA: non-contiguous (RMS) norm support (llama/11659)
JohannesGaessler Feb 4, 2025
a50d222
CUDA: support for mat. mul. with ne03 != ne13 (llama/11656)
JohannesGaessler Feb 5, 2025
e841c6f
metal : adjust support conditions for norm operators (llama/11671)
ggerganov Feb 5, 2025
85d428d
metal : avoid breaking build when metal API predates TARGET_OS_VISION…
charles-dyfis-net Feb 6, 2025
6cd96d9
vulkan: use smaller combined allocations to avoid fragmentation (llam…
jeffbolznv Feb 6, 2025
8444353
vulkan: initial support for IQ4_XS quantization (llama/11501)
remyoudompheng Feb 6, 2025
b23fc86
vulkan: optimize coopmat2 iq2/iq3 callbacks (llama/11521)
jeffbolznv Feb 6, 2025
12a4046
ggml : fix LoongArch compile error with 128-bit SIMD (llama/11701)
junchao-loongson Feb 6, 2025
bd19b23
SYCL: Adjust support condition for norm operators (llama/11674)
qnixsynapse Feb 6, 2025
2bd5ccb
ggml : optimize and build warning fix for LoongArch (llama/11709)
MQ-mengqing Feb 7, 2025
98b3824
SYCL: remove XMX info from print devices (llama/11712)
qnixsynapse Feb 7, 2025
e9961a7
vulkan: print shared memory size (llama/11719)
jeffbolznv Feb 7, 2025
fdf1349
CUDA: fix min. version for movmatrix (llama/11751)
JohannesGaessler Feb 8, 2025
75fc2c3
ggml: Fix data race in ggml threadpool (llama/11736)
kkontny Feb 8, 2025
1b3c1d2
vulkan: account for lookup tables when checking shared memory size (l…
jeffbolznv Feb 9, 2025
e9afd20
vulkan: add environment variable GGML_VK_PREFER_HOST_MEMORY to avoid …
wbruna Feb 10, 2025
453aaee
vulkan: Make Vulkan optional at runtime (#11493). (llama/11494)
daym Feb 10, 2025
04b22f0
fix: typos in documentation files (llama/11791)
maximevtush Feb 10, 2025
7f8cf75
CUDA: use arch list for compatibility check (llama/11775)
JohannesGaessler Feb 10, 2025
4b00e83
Fix #11802: Compile bug - RegQueryValueExA changed to RegQueryValueEx…
sheldonrobinson Feb 11, 2025
501b77b
CUDA: fix CUDART_VERSION checks (llama/11821)
JohannesGaessler Feb 12, 2025
0b897da
ggml-cpu: Fix duplicate MATMUL_INT8 (llama/11817)
ownia Feb 12, 2025
b669f7a
ggml : fix multi-threaded clamp_f32 (llama/11824)
Burton2000 Feb 12, 2025
d893024
cleanup: fix compile warnings associated with gnu_printf (llama/11811)
bandoti Feb 12, 2025
cb120dc
HIP: Switch to std::vector in rocblas version check (llama/11820)
IMbackK Feb 12, 2025
93ceeb8
sync : llama.cpp
ggerganov Feb 12, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions include/ggml-vulkan.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ extern "C" {
#define GGML_VK_NAME "Vulkan"
#define GGML_VK_MAX_DEVICES 16

GGML_BACKEND_API void ggml_vk_instance_init(void);

// backend API
GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);

Expand Down
2 changes: 1 addition & 1 deletion include/ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@

#ifndef __GNUC__
# define GGML_ATTRIBUTE_FORMAT(...)
#elif defined(__MINGW32__)
#elif defined(__MINGW32__) && !defined(__clang__)
# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
#else
# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
Expand Down
2 changes: 1 addition & 1 deletion scripts/sync-llama.last
Original file line number Diff line number Diff line change
@@ -1 +1 @@
21c84b5d2dc04050714567501bf78762bfa17846
0fb77f821f6e70ad8b8247a97d1022f0fef78991
14 changes: 1 addition & 13 deletions src/ggml-alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -989,19 +989,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte
this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
}

if (this_size > max_size) {
GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
__func__, t->name,
ggml_backend_buft_name(buft),
this_size, max_size);
for (size_t i = 0; i < n_buffers; i++) {
ggml_backend_buffer_free(buffers[i]);
}
free(buffers);
return NULL;
}

if ((cur_buf_size + this_size) > max_size) {
if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) {
// allocate tensors in the current buffer
if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
return NULL;
Expand Down
2 changes: 0 additions & 2 deletions src/ggml-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,6 @@ GGML_TABLE_BEGIN(uint8_t, ksigns_iq2xs, 128)
240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255,
GGML_TABLE_END()

//#if __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A // lowest compute capability for integer intrinsics
GGML_TABLE_BEGIN(uint64_t, ksigns64, 128)
0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff,
0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff,
Expand Down Expand Up @@ -508,7 +507,6 @@ GGML_TABLE_BEGIN(uint64_t, ksigns64, 128)
0x00ffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0x00ffffffff00ffff,
0xffffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0xffffffffffffffff,
GGML_TABLE_END()
//#endif


GGML_TABLE_BEGIN(uint64_t, iq2xxs_grid, 256)
Expand Down
18 changes: 6 additions & 12 deletions src/ggml-cpu/ggml-cpu-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -360,21 +360,15 @@ inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b)
#endif

#if defined(__loongarch_asx)

typedef union {
int32_t i;
float f;
} ft_union;

/* float type data load instructions */
static __m128 __lsx_vreplfr2vr_s(float val) {
ft_union fi_tmpval = {.f = val};
return (__m128)__lsx_vreplgr2vr_w(fi_tmpval.i);
static __m128 __lsx_vreplfr2vr_s(const float val) {
v4f32 res = {val, val, val, val};
return (__m128)res;
}

static __m256 __lasx_xvreplfr2vr_s(float val) {
ft_union fi_tmpval = {.f = val};
return (__m256)__lasx_xvreplgr2vr_w(fi_tmpval.i);
static __m256 __lasx_xvreplfr2vr_s(const float val) {
v8f32 res = {val, val, val, val, val, val, val, val};
return (__m256)res;
}
#endif

Expand Down
Loading