LlamaLib  v2.0.2
Cross-platform library for local LLMs
Loading...
Searching...
No Matches
LLMProvider Member List

This is the complete list of members for LLMProvider, including all inherited members.

apply_template(const json &messages)LLMvirtual
apply_template_json(const json &data)=0LLMpure virtual
build_apply_template_json(const json &messages)LLMprotectedvirtual
build_completion_json(const std::string &prompt, int id_slot=-1)LLMprotectedvirtual
build_detokenize_json(const std::vector< int32_t > &tokens)LLMprotectedvirtual
build_embeddings_json(const std::string &query)LLMprotectedvirtual
build_lora_list_json(const std::vector< LoraIdScalePath > &loras)LLMProviderprotectedvirtual
build_lora_weight_json(const std::vector< LoraIdScale > &loras)LLMProviderprotectedvirtual
build_slot_json(int id_slot, const std::string &action, const std::string &filepath)LLMLocalprotectedvirtual
build_tokenize_json(const std::string &query)LLMprotectedvirtual
cancel(int id_slot)=0LLMLocalpure virtual
completion(const std::string &prompt, CharArrayFn callback=nullptr, int id_slot=-1, bool return_response_json=false)LLMvirtual
completion_json(const json &data, CharArrayFn callback, bool callbackWithJSON)=0LLMpure virtual
completion_paramsLLM
debug(int debug_level)=0LLMProviderpure virtual
debug_implementation()=0LLMProviderpure virtual
detokenize(const std::vector< int32_t > &tokens)LLMvirtual
detokenize_json(const json &data)=0LLMpure virtual
embedding_size()=0LLMProviderpure virtual
embeddings(const std::string &query)LLMvirtual
embeddings_json(const json &data)=0LLMpure virtual
enable_reasoning(bool reasoning)LLMProviderinlinevirtual
get_completion_params()LLMinlinevirtual
get_grammar()LLMinlinevirtual
get_next_available_slot()=0LLMLocalpure virtual
grammarLLM
has_gpu_layers(const std::string &command)LLMstatic
join_server()=0LLMProviderpure virtual
join_service()=0LLMProviderpure virtual
LLM_args_to_command(const std::string &model_path, int num_slots=1, int num_threads=-1, int num_GPU_layers=0, bool flash_attention=false, int context_size=4096, int batch_size=2048, bool embedding_only=false, const std::vector< std::string > &lora_paths={})LLMstatic
load_slot(int id_slot, const std::string &filepath)LLMLocalinlinevirtual
logging_callback(CharArrayFn callback)=0LLMProviderpure virtual
logging_stop()LLMProvidervirtual
lora_list()LLMProvidervirtual
lora_list_json()=0LLMProviderpure virtual
lora_weight(const std::vector< LoraIdScale > &loras)LLMProvidervirtual
lora_weight_json(const json &data)=0LLMProviderpure virtual
n_keepLLM
parse_apply_template_json(const json &result)LLMprotectedvirtual
parse_completion_json(const json &result)LLMprotectedvirtual
parse_detokenize_json(const json &result)LLMprotectedvirtual
parse_embeddings_json(const json &result)LLMprotectedvirtual
parse_lora_list_json(const json &result)LLMProviderprotectedvirtual
parse_lora_weight_json(const json &result)LLMProviderprotectedvirtual
parse_slot_json(const json &result)LLMLocalprotectedvirtual
parse_tokenize_json(const json &result)LLMprotectedvirtual
reasoning_enabledLLMProviderprotected
save_slot(int id_slot, const std::string &filepath)LLMLocalinlinevirtual
set_completion_params(json completion_params_)LLMinlinevirtual
set_grammar(std::string grammar_)LLMinlinevirtual
set_SSL(const std::string &SSL_cert, const std::string &SSL_key)=0LLMProviderpure virtual
slot(int id_slot, const std::string &action, const std::string &filepath)LLMLocalprotectedvirtual
slot_json(const json &data)=0LLMLocalpure virtual
start()=0LLMProviderpure virtual
start_server(const std::string &host="0.0.0.0", int port=-1, const std::string &API_key="")=0LLMProviderpure virtual
started()=0LLMProviderpure virtual
stop()=0LLMProviderpure virtual
stop_server()=0LLMProviderpure virtual
tokenize(const std::string &query)LLMvirtual
tokenize_json(const json &data)=0LLMpure virtual
~LLM()=defaultLLMvirtual
~LLMProvider()LLMProvidervirtual