LlamaLib  v2.0.2
Cross-platform library for local LLMs
Loading...
Searching...
No Matches
LLMClient Member List

This is the complete list of members for LLMClient, including all inherited members.

apply_template(const json &messages)LLMvirtual
apply_template_json(const json &data) overrideLLMClientvirtual
build_apply_template_json(const json &messages)LLMprotectedvirtual
build_completion_json(const std::string &prompt, int id_slot=-1)LLMprotectedvirtual
build_detokenize_json(const std::vector< int32_t > &tokens)LLMprotectedvirtual
build_embeddings_json(const std::string &query)LLMprotectedvirtual
build_slot_json(int id_slot, const std::string &action, const std::string &filepath)LLMLocalprotectedvirtual
build_tokenize_json(const std::string &query)LLMprotectedvirtual
cancel(int id_slot) overrideLLMClientvirtual
completion(const std::string &prompt, CharArrayFn callback=nullptr, int id_slot=-1, bool return_response_json=false)LLMvirtual
completion_json(const json &data, CharArrayFn callback=nullptr, bool callbackWithJSON=true) overrideLLMClientvirtual
completion_paramsLLM
detokenize(const std::vector< int32_t > &tokens)LLMvirtual
detokenize_json(const json &data) overrideLLMClientvirtual
embeddings(const std::string &query)LLMvirtual
embeddings_json(const json &data) overrideLLMClientvirtual
get_completion_params()LLMinlinevirtual
get_grammar()LLMinlinevirtual
get_next_available_slot() overrideLLMClientvirtual
grammarLLM
has_gpu_layers(const std::string &command)LLMstatic
is_remote() constLLMClientinline
is_server_alive() (defined in LLMClient)LLMClient
LLM_args_to_command(const std::string &model_path, int num_slots=1, int num_threads=-1, int num_GPU_layers=0, bool flash_attention=false, int context_size=4096, int batch_size=2048, bool embedding_only=false, const std::vector< std::string > &lora_paths={})LLMstatic
LLMClient(LLMProvider *llm)LLMClient
LLMClient(const std::string &url, const int port, const std::string &API_key="", const int max_retries=5)LLMClient
load_slot(int id_slot, const std::string &filepath)LLMLocalinlinevirtual
n_keepLLM
parse_apply_template_json(const json &result)LLMprotectedvirtual
parse_completion_json(const json &result)LLMprotectedvirtual
parse_detokenize_json(const json &result)LLMprotectedvirtual
parse_embeddings_json(const json &result)LLMprotectedvirtual
parse_slot_json(const json &result)LLMLocalprotectedvirtual
parse_tokenize_json(const json &result)LLMprotectedvirtual
save_slot(int id_slot, const std::string &filepath)LLMLocalinlinevirtual
set_completion_params(json completion_params_)LLMinlinevirtual
set_grammar(std::string grammar_)LLMinlinevirtual
set_SSL(const char *SSL_cert)LLMClient
slot(int id_slot, const std::string &action, const std::string &filepath)LLMLocalprotectedvirtual
slot_json(const json &data) overrideLLMClientvirtual
tokenize(const std::string &query)LLMvirtual
tokenize_json(const json &data) overrideLLMClientvirtual
~LLM()=defaultLLMvirtual
~LLMClient()LLMClient