forked from kherud/java-llama.cpp
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathCMakeLists.txt
More file actions
134 lines (114 loc) · 4.34 KB
/
CMakeLists.txt
File metadata and controls
134 lines (114 loc) · 4.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
cmake_minimum_required(VERSION 3.14)
project(jllama CXX)
include(FetchContent)
set(BUILD_SHARED_LIBS ON)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(BUILD_SHARED_LIBS OFF)
option(LLAMA_VERBOSE "llama: verbose output" OFF)
#################### json ####################
FetchContent_Declare(
json
GIT_REPOSITORY https://github.com/nlohmann/json
GIT_TAG v3.11.3
)
FetchContent_MakeAvailable(json)
#################### llama.cpp ####################
set(LLAMA_BUILD_COMMON ON)
FetchContent_Declare(
llama.cpp
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git
GIT_TAG b4916
)
FetchContent_MakeAvailable(llama.cpp)
#################### jllama ####################
# find which OS we build for if not set (make sure to run mvn compile first)
if(NOT DEFINED OS_NAME)
if(ANDROID_ABI)
set(OS_NAME "Android")
else()
find_package(Java REQUIRED)
find_program(JAVA_EXECUTABLE NAMES java)
execute_process(
COMMAND ${JAVA_EXECUTABLE} -cp ${CMAKE_SOURCE_DIR}/target/classes de.kherud.llama.OSInfo --os
OUTPUT_VARIABLE OS_NAME
OUTPUT_STRIP_TRAILING_WHITESPACE
)
endif()
endif()
if(NOT OS_NAME)
message(FATAL_ERROR "Could not determine OS name")
endif()
# find which architecture we build for if not set (make sure to run mvn compile first)
if(NOT DEFINED OS_ARCH)
if(ANDROID_ABI)
set(OS_ARCH ${ANDROID_ABI})
else()
find_package(Java REQUIRED)
find_program(JAVA_EXECUTABLE NAMES java)
execute_process(
COMMAND ${JAVA_EXECUTABLE} -cp ${CMAKE_SOURCE_DIR}/target/classes de.kherud.llama.OSInfo --arch
OUTPUT_VARIABLE OS_ARCH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
endif()
endif()
if(NOT OS_ARCH)
message(FATAL_ERROR "Could not determine CPU architecture")
endif()
if(GGML_CUDA)
set(JLLAMA_DIR ${CMAKE_SOURCE_DIR}/src/main/resources_linux_cuda/de/kherud/llama/${OS_NAME}/${OS_ARCH})
message(STATUS "GPU (CUDA Linux) build - Installing files to ${JLLAMA_DIR}")
else()
set(JLLAMA_DIR ${CMAKE_SOURCE_DIR}/src/main/resources/de/kherud/llama/${OS_NAME}/${OS_ARCH})
message(STATUS "CPU build - Installing files to ${JLLAMA_DIR}")
endif()
# include jni.h and jni_md.h
if(NOT DEFINED JNI_INCLUDE_DIRS)
if(OS_NAME MATCHES "^Linux" OR OS_NAME STREQUAL "Mac" OR OS_NAME STREQUAL "Darwin")
set(JNI_INCLUDE_DIRS .github/include/unix)
elseif(OS_NAME STREQUAL "Windows")
set(JNI_INCLUDE_DIRS .github/include/windows)
# if we don't have provided headers, try to find them via Java
else()
find_package(Java REQUIRED)
find_program(JAVA_EXECUTABLE NAMES java)
find_path(JNI_INCLUDE_DIRS NAMES jni.h HINTS ENV JAVA_HOME PATH_SUFFIXES include)
# find "jni_md.h" include directory if not set
file(GLOB_RECURSE JNI_MD_PATHS RELATIVE "${JNI_INCLUDE_DIRS}" "${JNI_INCLUDE_DIRS}/**/jni_md.h")
foreach(PATH IN LISTS JNI_MD_PATHS)
get_filename_component(DIR ${PATH} DIRECTORY)
list(APPEND JNI_INCLUDE_DIRS "${JNI_INCLUDE_DIRS}/${DIR}")
endforeach()
endif()
endif()
if(NOT JNI_INCLUDE_DIRS)
if(ANDROID_ABI)
find_package(JNI REQUIRED)
set(JNI_INCLUDE_DIRS ${JNI_INCLUDE_DIRS})
else()
message(FATAL_ERROR "Could not determine JNI include directories")
endif()
endif()
add_library(jllama SHARED src/main/cpp/jllama.cpp src/main/cpp/server.hpp src/main/cpp/utils.hpp)
set_target_properties(jllama PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_include_directories(jllama PRIVATE src/main/cpp ${JNI_INCLUDE_DIRS})
target_link_libraries(jllama PRIVATE common llama nlohmann_json)
target_compile_features(jllama PRIVATE cxx_std_11)
target_compile_definitions(jllama PRIVATE
SERVER_VERBOSE=$<BOOL:${LLAMA_VERBOSE}>
)
if(OS_NAME STREQUAL "Windows")
set_target_properties(jllama llama ggml PROPERTIES
RUNTIME_OUTPUT_DIRECTORY_DEBUG ${JLLAMA_DIR}
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${JLLAMA_DIR}
RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${JLLAMA_DIR}
)
else()
set_target_properties(jllama llama ggml PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${JLLAMA_DIR}
)
endif()
if (LLAMA_METAL AND NOT LLAMA_METAL_EMBED_LIBRARY)
# copy ggml-common.h and ggml-metal.metal to bin directory
configure_file(${llama.cpp_SOURCE_DIR}/ggml-metal.metal ${JLLAMA_DIR}/ggml-metal.metal COPYONLY)
endif()