![]() |
LlamaLib
v2.0.2
Cross-platform library for local LLMs
|
C-compatible API functions. More...
| bool | Has_GPU_Layers (const char *command) |
| Check if command has GPU layers (C API) | |
| void | LLM_Debug (int debug_level) |
| Set global debug level (C API) | |
| void | LLM_Logging_Callback (CharArrayFn callback) |
| Set global logging callback (C API) | |
| void | LLM_Logging_Stop () |
| Stop global logging (C API) | |
| void | LLM_Set_Completion_Parameters (LLM *llm, const char *params_json="{}") |
| Set completion parameters (C API) | |
| const char * | LLM_Get_Completion_Parameters (LLM *llm) |
| Get completion parameters (C API) | |
| void | LLM_Set_Grammar (LLM *llm, const char *grammar="") |
| Set grammar (C API) | |
| const char * | LLM_Get_Grammar (LLM *llm) |
| Get grammar (C API) | |
| const char * | LLM_Apply_Template (LLM *llm, const char *messages_as_json) |
| Apply chat template (C API) | |
| const char * | LLM_Tokenize (LLM *llm, const char *query) |
| Tokenize text (C API) | |
| const char * | LLM_Detokenize (LLM *llm, const char *tokens_as_json) |
| Detokenize tokens (C API) | |
| const char * | LLM_Embeddings (LLM *llm, const char *query) |
| Generate embeddings (C API) | |
| const char * | LLM_Completion (LLM *llm, const char *prompt, CharArrayFn callback=nullptr, int id_slot=-1, bool return_response_json=false) |
| Generate completion (C API) | |
| const char * | LLM_Save_Slot (LLMLocal *llm, int id_slot, const char *filepath) |
| Save slot state (C API) | |
| const char * | LLM_Load_Slot (LLMLocal *llm, int id_slot, const char *filepath) |
| Load slot state (C API) | |
| void | LLM_Cancel (LLMLocal *llm, int id_slot) |
| Cancel request (C API) | |
| bool | LLM_Lora_Weight (LLMProvider *llm, const char *loras_as_json) |
| Configure LoRA weights (C API) | |
| void | LLM_Enable_Reasoning (LLMProvider *llm, bool enable_reasoning) |
| Enable reasoning (C API) | |
| const char * | LLM_Lora_List (LLMProvider *llm) |
| List LoRA adapters (C API) | |
| void | LLM_Delete (LLMProvider *llm) |
| Delete LLM provider (C API) | |
| void | LLM_Start (LLMProvider *llm) |
| Start LLM service (C API) | |
| const bool | LLM_Started (LLMProvider *llm) |
| Check if service is started (C API) | |
| void | LLM_Stop (LLMProvider *llm) |
| Stop LLM service (C API) | |
| void | LLM_Start_Server (LLMProvider *llm, const char *host="0.0.0.0", int port=-1, const char *API_key="") |
| Start HTTP server (C API) | |
| void | LLM_Stop_Server (LLMProvider *llm) |
| Stop HTTP server (C API) | |
| void | LLM_Join_Service (LLMProvider *llm) |
| Wait for service to complete (C API) | |
| void | LLM_Join_Server (LLMProvider *llm) |
| Wait for server to complete (C API) | |
| void | LLM_Set_SSL (LLMProvider *llm, const char *SSL_cert, const char *SSL_key) |
| Set SSL configuration (C API) | |
| const int | LLM_Status_Code () |
| Get last operation status code (C API) | |
| const char * | LLM_Status_Message () |
| Get last operation status message (C API) | |
| const int | LLM_Embedding_Size (LLMProvider *llm) |
| Get embedding vector size (C API) | |
| LLMAgent * | LLMAgent_Construct (LLMLocal *llm, const char *system_prompt="") |
| Construct LLMAgent (C API) | |
| void | LLMAgent_Set_System_Prompt (LLMAgent *llm, const char *system_prompt) |
| Set system prompt (C API) | |
| const char * | LLMAgent_Get_System_Prompt (LLMAgent *llm) |
| Get system prompt (C API) | |
| void | LLMAgent_Set_Slot (LLMAgent *llm, int slot_id) |
| Set processing slot (C API) | |
| int | LLMAgent_Get_Slot (LLMAgent *llm) |
| Get processing slot (C API) | |
| const char * | LLMAgent_Chat (LLMAgent *llm, const char *user_prompt, bool add_to_history=true, CharArrayFn callback=nullptr, bool return_response_json=false, bool debug_prompt=false) |
| Conduct chat interaction (C API) | |
| void | LLMAgent_Clear_History (LLMAgent *llm) |
| Clear conversation history (C API) | |
| const char * | LLMAgent_Get_History (LLMAgent *llm) |
| Get conversation history (C API) | |
| void | LLMAgent_Set_History (LLMAgent *llm, const char *history_json) |
| Set conversation history (C API) | |
| void | LLMAgent_Add_User_Message (LLMAgent *llm, const char *content) |
| Add user message to history (C API) | |
| void | LLMAgent_Add_Assistant_Message (LLMAgent *llm, const char *content) |
| Add assistant message to history (C API) | |
| void | LLMAgent_Remove_Last_Message (LLMAgent *llm) |
| Remove last message from history (C API) | |
| void | LLMAgent_Save_History (LLMAgent *llm, const char *filepath) |
| Save conversation history to file (C API) | |
| void | LLMAgent_Load_History (LLMAgent *llm, const char *filepath) |
| Load conversation history from file (C API) | |
| size_t | LLMAgent_Get_History_Size (LLMAgent *llm) |
| Get conversation history size (C API) | |
| bool | LLMClient_Is_Server_Alive (LLMClient *llm) |
| void | LLMClient_Set_SSL (LLMClient *llm, const char *SSL_cert) |
| Set SSL certificate (C API) | |
| LLMClient * | LLMClient_Construct (LLMProvider *llm) |
| Construct local LLMClient (C API) | |
| LLMClient * | LLMClient_Construct_Remote (const char *url, const int port, const char *API_key="") |
| Construct remote LLMClient (C API) | |
| const char * | Available_Architectures (bool gpu) |
| Get available architectures (C API) | |
| void | LLMService_Registry (LLMProviderRegistry *existing_instance) |
| Set registry for LLMService (C API) | |
| LLMService * | LLMService_Construct (const char *model_path, int num_slots=1, int num_threads=-1, int num_GPU_layers=0, bool flash_attention=false, int context_size=4096, int batch_size=2048, bool embedding_only=false, int lora_count=0, const char **lora_paths=nullptr) |
| Construct LLMService instance (C API) | |
| LLMService * | LLMService_From_Command (const char *params_string) |
| Create LLMService from command string (C API) | |
| const char * | LLMService_Command (LLMService *llm_service) |
| Returns the construct command (C API) | |
| void | LLMService_InjectErrorState (ErrorState *error_state) |
C-compatible API functions.
| const char * Available_Architectures | ( | bool | gpu | ) |
Get available architectures (C API)
| gpu | Whether to include GPU architectures |
Definition at line 355 of file LLM_runtime.cpp.
| bool Has_GPU_Layers | ( | const char * | command | ) |
| const char * LLM_Apply_Template | ( | LLM * | llm, |
| const char * | messages_as_json ) |
| void LLM_Cancel | ( | LLMLocal * | llm, |
| int | id_slot ) |
| const char * LLM_Completion | ( | LLM * | llm, |
| const char * | prompt, | ||
| CharArrayFn | callback = nullptr, | ||
| int | id_slot = -1, | ||
| bool | return_response_json = false ) |
Generate completion (C API)
| llm | LLM instance pointer |
| prompt | Input prompt |
| callback | Optional streaming callback |
| id_slot | Slot ID (-1 for auto) |
| return_response_json | Whether to return JSON response |
Definition at line 460 of file LLM.cpp.
| void LLM_Debug | ( | int | debug_level | ) |
| void LLM_Delete | ( | LLMProvider * | llm | ) |
Delete LLM provider (C API)
| llm | LLMProvider instance pointer |
Definition at line 542 of file LLM.cpp.
| const char * LLM_Detokenize | ( | LLM * | llm, |
| const char * | tokens_as_json ) |
| const int LLM_Embedding_Size | ( | LLMProvider * | llm | ) |
Get embedding vector size (C API)
| llm | LLMProvider instance pointer |
Definition at line 601 of file LLM.cpp.
| const char * LLM_Embeddings | ( | LLM * | llm, |
| const char * | query ) |
| void LLM_Enable_Reasoning | ( | LLMProvider * | llm, |
| bool | enable_reasoning ) |
Enable reasoning (C API)
| llm | LLMProvider instance pointer |
| enable_reasoning | bool whether to enable reasoning |
Definition at line 491 of file LLM.cpp.
| const char * LLM_Get_Completion_Parameters | ( | LLM * | llm | ) |
| const char * LLM_Get_Grammar | ( | LLM * | llm | ) |
| void LLM_Join_Server | ( | LLMProvider * | llm | ) |
Wait for server to complete (C API)
| llm | LLMProvider instance pointer |
Definition at line 565 of file LLM.cpp.
| void LLM_Join_Service | ( | LLMProvider * | llm | ) |
Wait for service to complete (C API)
| llm | LLMProvider instance pointer |
Definition at line 560 of file LLM.cpp.
| const char * LLM_Load_Slot | ( | LLMLocal * | llm, |
| int | id_slot, | ||
| const char * | filepath ) |
| void LLM_Logging_Callback | ( | CharArrayFn | callback | ) |
| void LLM_Logging_Stop | ( | ) |
| const char * LLM_Lora_List | ( | LLMProvider * | llm | ) |
List LoRA adapters (C API)
| llm | LLMProvider instance pointer |
Definition at line 530 of file LLM.cpp.
| bool LLM_Lora_Weight | ( | LLMProvider * | llm, |
| const char * | loras_as_json ) |
Configure LoRA weights (C API)
| llm | LLMProvider instance pointer |
| loras_as_json | JSON string with LoRA configuration |
Definition at line 511 of file LLM.cpp.
| const char * LLM_Save_Slot | ( | LLMLocal * | llm, |
| int | id_slot, | ||
| const char * | filepath ) |
| void LLM_Set_Completion_Parameters | ( | LLM * | llm, |
| const char * | params_json = "{}" ) |
| void LLM_Set_Grammar | ( | LLM * | llm, |
| const char * | grammar = "" ) |
| void LLM_Set_SSL | ( | LLMProvider * | llm, |
| const char * | SSL_cert, | ||
| const char * | SSL_key ) |
Set SSL configuration (C API)
| llm | LLMProvider instance pointer |
| SSL_cert | Path to certificate file |
| SSL_key | Path to private key file |
Definition at line 585 of file LLM.cpp.
| void LLM_Start | ( | LLMProvider * | llm | ) |
Start LLM service (C API)
| llm | LLMProvider instance pointer |
Definition at line 570 of file LLM.cpp.
| void LLM_Start_Server | ( | LLMProvider * | llm, |
| const char * | host = "0.0.0.0", | ||
| int | port = -1, | ||
| const char * | API_key = "" ) |
Start HTTP server (C API)
| llm | LLMProvider instance pointer |
| host | Host address (default: "0.0.0.0") |
| port | Port number (0 for auto) |
| API_key | Optional API key |
Definition at line 550 of file LLM.cpp.
| const bool LLM_Started | ( | LLMProvider * | llm | ) |
Check if service is started (C API)
| llm | LLMProvider instance pointer |
Definition at line 575 of file LLM.cpp.
| const int LLM_Status_Code | ( | ) |
| const char * LLM_Status_Message | ( | ) |
| void LLM_Stop | ( | LLMProvider * | llm | ) |
Stop LLM service (C API)
| llm | LLMProvider instance pointer |
Definition at line 580 of file LLM.cpp.
| void LLM_Stop_Server | ( | LLMProvider * | llm | ) |
Stop HTTP server (C API)
| llm | LLMProvider instance pointer |
Definition at line 555 of file LLM.cpp.
| const char * LLM_Tokenize | ( | LLM * | llm, |
| const char * | query ) |
| void LLMAgent_Add_Assistant_Message | ( | LLMAgent * | llm, |
| const char * | content ) |
Add assistant message to history (C API)
| llm | LLMAgent instance pointer |
| content | Message content text |
Appends a new message to conversation history
Definition at line 213 of file LLM_agent.cpp.
| void LLMAgent_Add_User_Message | ( | LLMAgent * | llm, |
| const char * | content ) |
Add user message to history (C API)
| llm | LLMAgent instance pointer |
| content | Message content text |
Appends a new message to conversation history
Definition at line 208 of file LLM_agent.cpp.
| const char * LLMAgent_Chat | ( | LLMAgent * | llm, |
| const char * | user_prompt, | ||
| bool | add_to_history = true, | ||
| CharArrayFn | callback = nullptr, | ||
| bool | return_response_json = false, | ||
| bool | debug_prompt = false ) |
Conduct chat interaction (C API)
| llm | LLMAgent instance pointer |
| user_prompt | User input message |
| add_to_history | Whether to save messages to history (default: true) |
| callback | Optional streaming callback function |
| return_response_json | Whether to return JSON response (default: false) |
| debug_prompt | Whether to display the complete prompt (default: false) |
Main chat method for conversational interactions
Definition at line 155 of file LLM_agent.cpp.
| void LLMAgent_Clear_History | ( | LLMAgent * | llm | ) |
Clear conversation history (C API)
| llm | LLMAgent instance pointer |
Removes all messages from conversation history
Definition at line 161 of file LLM_agent.cpp.
Construct LLMAgent (C API)
| llm | LLMLocal instance to wrap |
| system_prompt | Initial system prompt (default: "") |
Creates a conversational agent with the specified configuration
Definition at line 149 of file LLM_agent.cpp.
| const char * LLMAgent_Get_History | ( | LLMAgent * | llm | ) |
Get conversation history (C API)
| llm | LLMAgent instance pointer |
Definition at line 177 of file LLM_agent.cpp.
| size_t LLMAgent_Get_History_Size | ( | LLMAgent * | llm | ) |
Get conversation history size (C API)
| llm | LLMAgent instance pointer |
Definition at line 241 of file LLM_agent.cpp.
| int LLMAgent_Get_Slot | ( | LLMAgent * | llm | ) |
Get processing slot (C API)
| llm | LLMAgent instance pointer |
Definition at line 187 of file LLM_agent.cpp.
| const char * LLMAgent_Get_System_Prompt | ( | LLMAgent * | llm | ) |
Get system prompt (C API)
| llm | LLMAgent instance pointer |
Definition at line 172 of file LLM_agent.cpp.
| void LLMAgent_Load_History | ( | LLMAgent * | llm, |
| const char * | filepath ) |
Load conversation history from file (C API)
| llm | LLMAgent instance pointer |
| filepath | Path to history file to load |
Loads conversation history from JSON file
Definition at line 232 of file LLM_agent.cpp.
| void LLMAgent_Remove_Last_Message | ( | LLMAgent * | llm | ) |
Remove last message from history (C API)
| llm | LLMAgent instance pointer |
Removes the most recently added message from history
Definition at line 218 of file LLM_agent.cpp.
| void LLMAgent_Save_History | ( | LLMAgent * | llm, |
| const char * | filepath ) |
Save conversation history to file (C API)
| llm | LLMAgent instance pointer |
| filepath | Path to save history file |
Saves conversation history as JSON to specified file
Definition at line 223 of file LLM_agent.cpp.
| void LLMAgent_Set_History | ( | LLMAgent * | llm, |
| const char * | history_json ) |
Set conversation history (C API)
| llm | LLMAgent instance pointer |
| history_json | JSON string containing conversation history |
Replaces current history with provided JSON data
Definition at line 192 of file LLM_agent.cpp.
| void LLMAgent_Set_Slot | ( | LLMAgent * | llm, |
| int | slot_id ) |
Set processing slot (C API)
| llm | LLMAgent instance pointer |
| slot_id | Slot ID to assign |
Definition at line 182 of file LLM_agent.cpp.
| void LLMAgent_Set_System_Prompt | ( | LLMAgent * | llm, |
| const char * | system_prompt ) |
Set system prompt (C API)
| llm | LLMAgent instance pointer |
| system_prompt | New system prompt string |
Setting system prompt clears conversation history
Definition at line 166 of file LLM_agent.cpp.
| LLMClient * LLMClient_Construct | ( | LLMProvider * | llm | ) |
Construct local LLMClient (C API)
| llm | LLMProvider instance to wrap |
Creates a client for local LLM provider access
Definition at line 394 of file LLM_client.cpp.
| LLMClient * LLMClient_Construct_Remote | ( | const char * | url, |
| const int | port, | ||
| const char * | API_key = "" ) |
Construct remote LLMClient (C API)
| url | Server URL or hostname |
| port | Server port number |
Creates a client for remote LLM server access
Definition at line 399 of file LLM_client.cpp.
| bool LLMClient_Is_Server_Alive | ( | LLMClient * | llm | ) |
Definition at line 384 of file LLM_client.cpp.
| void LLMClient_Set_SSL | ( | LLMClient * | llm, |
| const char * | SSL_cert ) |
Set SSL certificate (C API)
| llm | LLMClient instance pointer |
| SSL_cert | Path to SSL certificate file |
Configure SSL certificate for remote client connections
Definition at line 389 of file LLM_client.cpp.
| const char * LLMService_Command | ( | LLMService * | llm_service | ) |
Returns the construct command (C API)
| llm_service | the LLMService instance |
Definition at line 751 of file LLM_service.cpp.
| LLMService * LLMService_Construct | ( | const char * | model_path, |
| int | num_slots = 1, | ||
| int | num_threads = -1, | ||
| int | num_GPU_layers = 0, | ||
| bool | flash_attention = false, | ||
| int | context_size = 4096, | ||
| int | batch_size = 2048, | ||
| bool | embedding_only = false, | ||
| int | lora_count = 0, | ||
| const char ** | lora_paths = nullptr ) |
Construct LLMService instance (C API)
| model_path | Path to model file |
| num_slots | Number of parallel sequences |
| num_threads | Number of CPU threads (-1 for auto) |
| num_GPU_layers | Number of GPU layers |
| flash_attention | Whether to use flash attention |
| context_size | Maximum context size |
| batch_size | Processing batch size |
| embedding_only | Whether embedding-only mode |
| lora_count | Number of LoRA paths provided |
| lora_paths | Array of LoRA file paths |
Definition at line 710 of file LLM_service.cpp.
| LLMService * LLMService_From_Command | ( | const char * | params_string | ) |
Create LLMService from command string (C API)
| params_string | Command line parameter string |
See https://github.com/ggml-org/llama.cpp/tree/master/tools/server#usage for arguments.
Definition at line 729 of file LLM_service.cpp.
| void LLMService_InjectErrorState | ( | ErrorState * | error_state | ) |
Definition at line 756 of file LLM_service.cpp.
| void LLMService_Registry | ( | LLMProviderRegistry * | existing_instance | ) |
Set registry for LLMService (C API)
| existing_instance | Existing registry instance to use |
Allows injection of custom registry for LLMService instances
Definition at line 705 of file LLM_service.cpp.