![]() |
LlamaLib
v2.0.2
Cross-platform library for local LLMs
|
This is the complete list of members for LLMClient, including all inherited members.
| apply_template(const json &messages) | LLM | virtual |
| apply_template_json(const json &data) override | LLMClient | virtual |
| build_apply_template_json(const json &messages) | LLM | protectedvirtual |
| build_completion_json(const std::string &prompt, int id_slot=-1) | LLM | protectedvirtual |
| build_detokenize_json(const std::vector< int32_t > &tokens) | LLM | protectedvirtual |
| build_embeddings_json(const std::string &query) | LLM | protectedvirtual |
| build_slot_json(int id_slot, const std::string &action, const std::string &filepath) | LLMLocal | protectedvirtual |
| build_tokenize_json(const std::string &query) | LLM | protectedvirtual |
| cancel(int id_slot) override | LLMClient | virtual |
| completion(const std::string &prompt, CharArrayFn callback=nullptr, int id_slot=-1, bool return_response_json=false) | LLM | virtual |
| completion_json(const json &data, CharArrayFn callback=nullptr, bool callbackWithJSON=true) override | LLMClient | virtual |
| completion_params | LLM | |
| detokenize(const std::vector< int32_t > &tokens) | LLM | virtual |
| detokenize_json(const json &data) override | LLMClient | virtual |
| embeddings(const std::string &query) | LLM | virtual |
| embeddings_json(const json &data) override | LLMClient | virtual |
| get_completion_params() | LLM | inlinevirtual |
| get_grammar() | LLM | inlinevirtual |
| get_next_available_slot() override | LLMClient | virtual |
| grammar | LLM | |
| has_gpu_layers(const std::string &command) | LLM | static |
| is_remote() const | LLMClient | inline |
| is_server_alive() (defined in LLMClient) | LLMClient | |
| LLM_args_to_command(const std::string &model_path, int num_slots=1, int num_threads=-1, int num_GPU_layers=0, bool flash_attention=false, int context_size=4096, int batch_size=2048, bool embedding_only=false, const std::vector< std::string > &lora_paths={}) | LLM | static |
| LLMClient(LLMProvider *llm) | LLMClient | |
| LLMClient(const std::string &url, const int port, const std::string &API_key="", const int max_retries=5) | LLMClient | |
| load_slot(int id_slot, const std::string &filepath) | LLMLocal | inlinevirtual |
| n_keep | LLM | |
| parse_apply_template_json(const json &result) | LLM | protectedvirtual |
| parse_completion_json(const json &result) | LLM | protectedvirtual |
| parse_detokenize_json(const json &result) | LLM | protectedvirtual |
| parse_embeddings_json(const json &result) | LLM | protectedvirtual |
| parse_slot_json(const json &result) | LLMLocal | protectedvirtual |
| parse_tokenize_json(const json &result) | LLM | protectedvirtual |
| save_slot(int id_slot, const std::string &filepath) | LLMLocal | inlinevirtual |
| set_completion_params(json completion_params_) | LLM | inlinevirtual |
| set_grammar(std::string grammar_) | LLM | inlinevirtual |
| set_SSL(const char *SSL_cert) | LLMClient | |
| slot(int id_slot, const std::string &action, const std::string &filepath) | LLMLocal | protectedvirtual |
| slot_json(const json &data) override | LLMClient | virtual |
| tokenize(const std::string &query) | LLM | virtual |
| tokenize_json(const json &data) override | LLMClient | virtual |
| ~LLM()=default | LLM | virtual |
| ~LLMClient() | LLMClient |