id
int64
0
755k
file_name
stringlengths
3
109
file_path
stringlengths
13
185
content
stringlengths
31
9.38M
size
int64
31
9.38M
language
stringclasses
1 value
extension
stringclasses
11 values
total_lines
int64
1
340k
avg_line_length
float64
2.18
149k
max_line_length
int64
7
2.22M
alphanum_fraction
float64
0
1
repo_name
stringlengths
6
65
repo_stars
int64
100
47.3k
repo_forks
int64
0
12k
repo_open_issues
int64
0
3.4k
repo_license
stringclasses
9 values
repo_extraction_date
stringclasses
92 values
exact_duplicates_redpajama
bool
2 classes
near_duplicates_redpajama
bool
2 classes
exact_duplicates_githubcode
bool
2 classes
exact_duplicates_stackv2
bool
1 class
exact_duplicates_stackv1
bool
2 classes
near_duplicates_githubcode
bool
2 classes
near_duplicates_stackv1
bool
2 classes
near_duplicates_stackv2
bool
1 class
3,773
core_api_utils.cpp
typesense_typesense/src/core_api_utils.cpp
#include "core_api_utils.h" Option<bool> stateful_remove_docs(deletion_state_t* deletion_state, size_t batch_size, bool& done) { bool removed = true; size_t batch_count = 0; for(size_t i=0; i<deletion_state->index_ids.size(); i++) { std::pair<size_t, uint32_t*>& size_ids = deletion_state->index_ids[i]; size_t ids_len = size_ids.first; uint32_t* ids = size_ids.second; size_t start_index = deletion_state->offsets[i]; size_t batched_len = std::min(ids_len, (start_index+batch_size)); for(size_t j=start_index; j<batched_len; j++) { Option<bool> remove_op = deletion_state->collection->remove_if_found(ids[j], true); if(!remove_op.ok()) { return remove_op; } removed = remove_op.get(); if(removed) { deletion_state->num_removed++; } deletion_state->offsets[i]++; batch_count++; if(batch_count == batch_size) { goto END; } } } END: done = true; for(size_t i=0; i<deletion_state->index_ids.size(); i++) { size_t current_offset = deletion_state->offsets[i]; done = done && (current_offset == deletion_state->index_ids[i].first); } return Option<bool>(removed); } Option<bool> stateful_export_docs(export_state_t* export_state, size_t batch_size, bool& done) { size_t batch_count = 0; export_state->res_body->clear(); auto const& filter_result = export_state->filter_result; size_t ids_len = filter_result.count; uint32_t* ids = filter_result.docs; size_t start_index = export_state->offset; size_t batched_len = std::min(ids_len, (start_index+batch_size)); for(size_t j = start_index; j < batched_len; j++) { uint32_t seq_id = ids[j]; nlohmann::json doc; auto const& coll = export_state->collection; Option<bool> get_op = coll->get_document_from_store(seq_id, doc); Collection::remove_flat_fields(doc); Collection::remove_reference_helper_fields(doc); if(get_op.ok()) { coll->prune_doc_with_lock(doc, export_state->include_fields, export_state->exclude_fields, filter_result.coll_to_references[j], seq_id, export_state->ref_include_exclude_fields_vec); export_state->res_body->append(doc.dump()); export_state->res_body->append("\n"); } export_state->offset++; batch_count++; if(batch_count == batch_size) { goto END; } } END: done = export_state->offset == export_state->filter_result.count; if(done && !export_state->res_body->empty()) { export_state->res_body->pop_back(); } return Option<bool>(true); }
2,888
C++
.cpp
69
32.391304
102
0.577818
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,774
facet_index.cpp
typesense_typesense/src/facet_index.cpp
#include "facet_index.h" #include <tokenizer.h> #include "string_utils.h" #include "array_utils.h" void facet_index_t::initialize(const std::string& field) { const auto facet_field_map_it = facet_field_map.find(field); if(facet_field_map_it == facet_field_map.end()) { // NOTE: try_emplace is needed to construct the value object in-place without calling the destructor facet_field_map.try_emplace(field); } } void facet_index_t::insert(const std::string& field_name, std::unordered_map<facet_value_id_t, std::vector<uint32_t>, facet_value_id_t::Hash>& fvalue_to_seq_ids, std::unordered_map<uint32_t, std::vector<facet_value_id_t>>& seq_id_to_fvalues, bool is_string_field) { const auto facet_field_map_it = facet_field_map.find(field_name); if(facet_field_map_it == facet_field_map.end()) { return; // field is not initialized or dropped } auto& facet_index = facet_field_map_it->second; auto& fvalue_index = facet_index.fvalue_seq_ids; auto fhash_index = facet_index.seq_id_hashes; for(const auto& seq_id_fvalues: seq_id_to_fvalues) { auto seq_id = seq_id_fvalues.first; std::vector<uint32_t> real_facet_ids; real_facet_ids.reserve(seq_id_fvalues.second.size()); for(const auto& fvalue: seq_id_fvalues.second) { uint32_t facet_id = fvalue.facet_id; const auto& fvalue_index_it = fvalue_index.find(fvalue.facet_value); if(fvalue.facet_id == UINT32_MAX) { // float, int32 & bool will provide facet_id as their own numerical values facet_id = (fvalue_index_it == fvalue_index.end()) ? ++next_facet_id : fvalue_index_it->second.facet_id; if(!is_string_field) { int64_t val = std::stoll(fvalue.facet_value); facet_index.fhash_to_int64_map[facet_id] = val; } } real_facet_ids.push_back(facet_id); auto seq_ids_it = fvalue_to_seq_ids.find(fvalue); if(seq_ids_it == fvalue_to_seq_ids.end()) { continue; } auto& seq_ids = seq_ids_it->second; if(fvalue_index_it == fvalue_index.end()) { facet_id_seq_ids_t fis; fis.facet_id = facet_id; if(facet_index.has_value_index) { fis.seq_ids = ids_t::create(seq_ids); auto new_count = ids_t::num_ids(fis.seq_ids); fis.facet_count_it = facet_index.counts.emplace(fvalue.facet_value, new_count, facet_id); } fvalue_index.emplace(fvalue.facet_value, fis); } else if(facet_index.has_value_index) { for(const auto id : seq_ids) { ids_t::upsert(fvalue_index_it->second.seq_ids, id); } auto facet_count_it = fvalue_index_it->second.facet_count_it; if(facet_count_it->facet_id == facet_id) { auto facet_count_node = facet_index.counts.extract(facet_count_it); facet_count_node.value().count = ids_t::num_ids(fvalue_index_it->second.seq_ids); facet_index.counts.insert(std::move(facet_count_node)); } else { LOG(ERROR) << "Wrong reference stored for facet " << fvalue.facet_value << " with facet_id " << facet_id; } } fvalue_to_seq_ids.erase(fvalue); } if(facet_index.has_hash_index && fhash_index != nullptr) { fhash_index->upsert(seq_id, real_facet_ids); } } } bool facet_index_t::contains(const std::string& field_name) { const auto& facet_field_it = facet_field_map.find(field_name); if(facet_field_it == facet_field_map.end()) { return false; } return true; } void facet_index_t::erase(const std::string& field_name) { facet_field_map.erase(field_name); } void facet_index_t::get_stringified_value(const nlohmann::json& value, const field& afield, std::vector<std::string>& values) { if(afield.is_int32()) { int32_t raw_val = value.get<int32_t>(); values.push_back(std::to_string(raw_val)); } else if(afield.is_int64()) { int64_t raw_val = value.get<int64_t>(); values.push_back(std::to_string(raw_val)); } else if(afield.is_string()) { const std::string& raw_val = value.get<std::string>().substr(0, 100); values.push_back(raw_val); } else if(afield.is_float()) { float raw_val = value.get<float>(); values.push_back(StringUtils::float_to_str(raw_val)); } else if(afield.is_bool()) { bool raw_val = value.get<bool>(); auto str_val = (raw_val == 1) ? "true" : "false"; values.emplace_back(str_val); } } void facet_index_t::get_stringified_values(const nlohmann::json& document, const field& afield, std::vector<std::string>& values) { bool is_array = afield.is_array(); if(!is_array) { return get_stringified_value(document[afield.name], afield, values); } else { const auto& field_values = document[afield.name]; for(size_t i = 0; i < field_values.size(); i++) { get_stringified_value(field_values[i], afield, values); } } } void facet_index_t::remove(const nlohmann::json& doc, const field& afield, const uint32_t seq_id) { const auto facet_field_it = facet_field_map.find(afield.name); if(facet_field_it == facet_field_map.end()) { return ; } auto& facet_index_map = facet_field_it->second.fvalue_seq_ids; std::vector<std::string> dead_fvalues; std::vector<std::string> values; get_stringified_values(doc, afield, values); for(const auto& value: values) { auto fvalue_it = facet_index_map.find(value); if(fvalue_it == facet_index_map.end()) { continue; } void*& ids = fvalue_it->second.seq_ids; if(ids && ids_t::contains(ids, seq_id)) { ids_t::erase(ids, seq_id); auto new_count = ids_t::num_ids(ids); auto& counts = facet_field_it->second.counts; if(new_count == 0) { ids_t::destroy_list(ids); dead_fvalues.push_back(fvalue_it->first); // remove from int64 lookup map first auto& fhash_int64_map = facet_field_it->second.fhash_to_int64_map; uint32_t fhash = fvalue_it->second.facet_id; fhash_int64_map.erase(fhash); counts.erase(fvalue_it->second.facet_count_it); } else { // update count auto count_node = counts.extract(fvalue_it->second.facet_count_it); count_node.value().count = ids_t::num_ids(ids); counts.insert(std::move(count_node)); } } } for(auto& dead_fvalue: dead_fvalues) { facet_index_map.erase(dead_fvalue); } auto& seq_id_hashes = facet_field_it->second.seq_id_hashes; seq_id_hashes->erase(seq_id); } size_t facet_index_t::get_facet_count(const std::string& field_name) { const auto it = facet_field_map.find(field_name); if(it == facet_field_map.end()) { return 0; } return has_hash_index(field_name) ? it->second.seq_id_hashes->num_ids() : it->second.counts.size(); } //returns the count of matching seq_ids from result array size_t facet_index_t::intersect(facet& a_facet, const field& facet_field, bool has_facet_query, bool estimate_facets, size_t facet_sample_interval, const std::vector<std::vector<std::string>>& fvalue_searched_tokens, const std::vector<char>& symbols_to_index, const std::vector<char>& token_separators, const uint32_t* result_ids, size_t result_ids_len, size_t max_facet_count, std::map<std::string, docid_count_t>& found, bool is_wildcard_no_filter_query, const std::string& sort_order) { //LOG (INFO) << "intersecting field " << field; const auto& facet_field_it = facet_field_map.find(a_facet.field_name); if(facet_field_it == facet_field_map.end()) { return 0; } const auto& facet_index_map = facet_field_it->second.fvalue_seq_ids; const auto& counter_list = facet_field_it->second.counts; //LOG(INFO) << "fvalue_seq_ids size " << facet_index_map.size() << " , counts size " << counter_list.size(); // We look 2 * max_facet_count when keyword search / filtering is involved to ensure that we // try and pick the actual top facets by count. size_t max_facets = is_wildcard_no_filter_query ? std::min((size_t)max_facet_count, counter_list.size()) : std::min((size_t)2 * max_facet_count, counter_list.size()); auto intersect_fn = [&] (std::multiset<facet_count_t>::const_iterator facet_count_it) { uint32_t count = 0; uint32_t doc_id = 0; if(has_facet_query) { bool found_search_token = false; auto facet_str = facet_count_it->facet_value; std::vector<std::string> facet_tokens; if(facet_field.is_string()) { Tokenizer(facet_str, true, false, facet_field.locale, symbols_to_index, token_separators).tokenize(facet_tokens); } else { facet_tokens.push_back(facet_str); } for(const auto& searched_tokens : fvalue_searched_tokens) { bool found_all_search_tokens = true; for (const auto &searched_token: searched_tokens) { bool facet_tokens_found = false; for(const auto& token : facet_tokens) { if (token.compare(0, searched_token.size(), searched_token) == 0) { facet_tokens_found = true; break; } } if(!facet_tokens_found) { found_all_search_tokens = false; } } if (found_all_search_tokens) { a_facet.fvalue_tokens[facet_count_it->facet_value] = searched_tokens; found_search_token = true; break; } } if(!found_search_token) { return; } } auto ids = facet_index_map.at(facet_count_it->facet_value).seq_ids; if (!ids) { return; } if (is_wildcard_no_filter_query) { count = facet_count_it->count; } else { auto val_count = ids_t::num_ids(ids); bool estimate_facet_count = (estimate_facets && val_count > 300); count = ids_t::intersect_count(ids, result_ids, result_ids_len, estimate_facet_count, facet_sample_interval); } if (count) { doc_id = ids_t::first_id(ids); found[facet_count_it->facet_value] = {doc_id, count}; } }; if(sort_order.empty()) { for (auto facet_count_it = counter_list.begin(); facet_count_it != counter_list.end(); ++facet_count_it) { //LOG(INFO) << "checking ids in facet_value " << facet_count.facet_value << " having total count " // << facet_count.count << ", is_wildcard_no_filter_query: " << is_wildcard_no_filter_query; intersect_fn(facet_count_it); if (found.size() == max_facets) { break; } } } else { if(sort_order == "asc") { for(auto facet_index_map_it = facet_index_map.begin(); facet_index_map_it != facet_index_map.end(); ++facet_index_map_it) { intersect_fn(facet_index_map_it->second.facet_count_it); if (found.size() == max_facets) { break; } } } else if(sort_order == "desc") { for(auto facet_index_map_it = facet_index_map.rbegin(); facet_index_map_it != facet_index_map.rend(); ++facet_index_map_it) { intersect_fn(facet_index_map_it->second.facet_count_it); if (found.size() == max_facets) { break; } } } } return found.size(); } facet_index_t::~facet_index_t() { facet_field_map.clear(); } // used for migrating string and int64 facets size_t facet_index_t::get_facet_indexes(const std::string& field_name, std::map<uint32_t, std::vector<uint32_t>>& seqid_countIndexes) { const auto& facet_field_it = facet_field_map.find(field_name); if(facet_field_it == facet_field_map.end()) { return 0; } auto& facet_index_map = facet_field_it->second.fvalue_seq_ids; std::vector<uint32_t> id_list; for(auto facet_index_map_it = facet_index_map.begin(); facet_index_map_it != facet_index_map.end(); ++facet_index_map_it) { //auto ids = facet_index_map_it->seq_ids; auto ids = facet_index_map_it->second.seq_ids; if(!ids) { continue; } ids_t::uncompress(ids, id_list); // emplacing seq_id => next_facet_id for(const auto& id : id_list) { //seqid_countIndexes[id].emplace_back(facet_index_map_it->facet_id); seqid_countIndexes[id].emplace_back(facet_index_map_it->second.facet_id); } id_list.clear(); } return seqid_countIndexes.size(); } void facet_index_t::handle_index_change(const std::string& field_name, size_t total_num_docs, size_t facet_index_threshold, size_t facet_count) { // Low cardinality fields will have only value based facet index. Once a field becomes a high cardinality // field (exceeding FACET_INDEX_THRESHOLD), we will create a hash based index and populate it. // If a field is an id-like field (cardinality_ratio < 5) we will then remove value based index. auto& facet_index = facet_field_map.at(field_name); posting_list_t*& fhash_index = facet_index.seq_id_hashes; if(fhash_index == nullptr && (facet_count > facet_index_threshold) && total_num_docs < 1000000) { fhash_index = new posting_list_t(256); std::map<uint32_t, std::vector<uint32_t>> seq_id_index_map; if(get_facet_indexes(field_name, seq_id_index_map)) { for(const auto& kv : seq_id_index_map) { fhash_index->upsert(kv.first, kv.second); } } seq_id_index_map.clear(); facet_index.has_hash_index = true; auto cardinality_ratio = total_num_docs / facet_count; if(cardinality_ratio != 0 && cardinality_ratio < 5) { // drop the value index for this field auto& fvalue_seq_ids = facet_index.fvalue_seq_ids; for(auto it = fvalue_seq_ids.begin(); it != fvalue_seq_ids.end(); ++it) { ids_t::destroy_list(it->second.seq_ids); } fvalue_seq_ids.clear(); facet_index.counts.clear(); facet_index.has_value_index = false; } } } bool facet_index_t::has_hash_index(const std::string &field_name) { auto facet_index_it = facet_field_map.find(field_name); return facet_index_it != facet_field_map.end() && facet_index_it->second.has_hash_index; } bool facet_index_t::has_value_index(const std::string &field_name) { auto facet_index_it = facet_field_map.find(field_name); return facet_index_it != facet_field_map.end() && facet_index_it->second.has_value_index; } posting_list_t* facet_index_t::get_facet_hash_index(const std::string &field_name) { auto facet_index_it = facet_field_map.find(field_name); if(facet_index_it != facet_field_map.end()) { return facet_index_it->second.seq_id_hashes; } return nullptr; } const spp::sparse_hash_map<uint32_t , int64_t >& facet_index_t::get_fhash_int64_map(const std::string& field_name) { static const spp::sparse_hash_map<uint32_t, int64_t> empty_map{}; const auto facet_field_map_it = facet_field_map.find(field_name); if(facet_field_map_it == facet_field_map.end()) { return empty_map; // field is not initialized or dropped } const auto& facet_index = facet_field_map_it->second; return facet_index.fhash_to_int64_map; } bool facet_index_t::facet_value_exists(const std::string& field_name, const std::string& fvalue) { const auto facet_field_map_it = facet_field_map.find(field_name); if(facet_field_map_it == facet_field_map.end()) { return false; } const auto& facet_index = facet_field_map_it->second; return facet_index.fvalue_seq_ids.find(fvalue) != facet_index.fvalue_seq_ids.end(); } size_t facet_index_t::facet_val_num_ids(const string &field_name, const string &fvalue) { const auto facet_field_map_it = facet_field_map.find(field_name); if(facet_field_map_it == facet_field_map.end()) { return 0; } if(facet_field_map_it->second.fvalue_seq_ids.count(fvalue) == 0) { return 0; } auto seq_ids = facet_field_map_it->second.fvalue_seq_ids[fvalue].seq_ids; return seq_ids ? ids_t::num_ids(seq_ids) : 0; } size_t facet_index_t::facet_node_count(const string &field_name, const string &fvalue) { const auto facet_field_map_it = facet_field_map.find(field_name); if(facet_field_map_it == facet_field_map.end()) { return 0; } if(facet_field_map_it->second.fvalue_seq_ids.count(fvalue) == 0) { return 0; } return facet_field_map_it->second.fvalue_seq_ids[fvalue].facet_count_it->count; } void facet_index_t::check_for_high_cardinality(const string& field_name, size_t total_num_docs) { // high cardinality or sparse facet fields must be dropped from value facet index if(total_num_docs < 10*1000) { return ; } const auto facet_field_map_it = facet_field_map.find(field_name); if(facet_field_map_it == facet_field_map.end()) { return ; } if(!facet_field_map_it->second.has_value_index) { return ; } auto num_facet_values = facet_field_map_it->second.fvalue_seq_ids.size(); bool is_sparse_field = false; size_t num_docs_with_facet = facet_field_map_it->second.seq_id_hashes->num_ids(); if(num_docs_with_facet > 0 && num_docs_with_facet < 10*1000) { is_sparse_field = true; } size_t value_facet_threshold = 0.8 * total_num_docs; if(num_facet_values > value_facet_threshold || is_sparse_field) { // if there are too many unique values // or if there are too few docs for facet field, we will drop the value index auto& fvalue_seq_ids = facet_field_map_it->second.fvalue_seq_ids; for(auto it = fvalue_seq_ids.begin(); it != fvalue_seq_ids.end(); ++it) { ids_t::destroy_list(it->second.seq_ids); it->second.seq_ids = nullptr; } facet_field_map_it->second.counts.clear(); facet_field_map_it->second.has_value_index = false; //LOG(INFO) << "Dropped value index for field " << field_name; } }
19,600
C++
.cpp
415
36.881928
130
0.584582
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,775
personalization_model.cpp
typesense_typesense/src/personalization_model.cpp
#include "personalization_model.h" #include "collection_manager.h" #include "archive_utils.h" #include <iostream> #include <filesystem> std::string PersonalizationModel::get_model_subdir(const std::string& model_id) { std::string model_dir = EmbedderManager::get_model_dir(); if (model_dir.back() != '/') { model_dir += '/'; } std::string full_path = model_dir + "per_" + model_id; if (!std::filesystem::exists(full_path)) { std::filesystem::create_directories(full_path); } return full_path; } PersonalizationModel::PersonalizationModel(const std::string& model_id) : model_id_(model_id) { model_path_ = get_model_subdir(model_id_); } PersonalizationModel::~PersonalizationModel() { } Option<bool> PersonalizationModel::validate_model(const nlohmann::json& model_json) { if (!model_json.contains("id") || !model_json["id"].is_string()) { return Option<bool>(400, "Missing or invalid 'id' field."); } if (!model_json.contains("name") || !model_json["name"].is_string()) { return Option<bool>(400, "Missing or invalid 'name' field."); } if (!model_json.contains("collection") || !model_json["collection"].is_string()) { return Option<bool>(400, "Missing or invalid 'collection' field."); } const std::string& name = model_json["name"].get<std::string>(); size_t slash_pos = name.find('/'); if (slash_pos == std::string::npos || name.find('/', slash_pos + 1) != std::string::npos) { return Option<bool>(400, "Model name must contain exactly one '/' character."); } std::string namespace_part = name.substr(0, slash_pos); if (namespace_part != "ts") { return Option<bool>(400, "Model namespace must be 'ts'."); } std::string model_name = name.substr(slash_pos + 1); if (model_name.empty()) { return Option<bool>(400, "Model name part cannot be empty."); } if (!model_json.contains("type") || !model_json["type"].is_string()) { return Option<bool>(400, "Missing or invalid 'type' field. Must be either 'recommendation' or 'search'."); } const std::string& type = model_json["type"].get<std::string>(); if (type != "recommendation" && type != "search") { return Option<bool>(400, "Invalid type. Must be either 'recommendation' or 'search'."); } auto type_names = valid_model_names.find(type); if (type_names == valid_model_names.end() || std::find(type_names->second.begin(), type_names->second.end(), model_name) == type_names->second.end()) { return Option<bool>(400, "Invalid model name for type. Use 'tyrec-1' for recommendation and 'tyrec-2' for search."); } auto& collection_manager = CollectionManager::get_instance(); const std::string& collection_name = model_json["collection"].get<std::string>(); if (collection_manager.get_collection(collection_name) == nullptr) { return Option<bool>(404, "Collection '" + collection_name + "' not found."); } return Option<bool>(true); } Option<bool> PersonalizationModel::create_model(const std::string& model_id, const nlohmann::json& model_json, const std::string model_data) { std::string model_path = get_model_subdir(model_id); std::string metadata_path = model_path + "/metadata.json"; std::ofstream metadata_file(metadata_path); if (!metadata_file) { return Option<bool>(500, "Failed to create metadata file"); } metadata_file << model_json.dump(4); metadata_file.close(); if (!metadata_file) { return Option<bool>(500, "Failed to write metadata file"); } if (!ArchiveUtils::extract_tar_gz_from_memory(model_data, model_path)) { return Option<bool>(500, "Failed to extract model archive"); } std::string onnx_path = model_path + "/model.onnx"; if (!std::filesystem::exists(onnx_path)) { return Option<bool>(400, "Missing required model.onnx file in archive"); } return Option<bool>(true); } Option<bool> PersonalizationModel::update_model(const std::string& model_id, const nlohmann::json& model_json, const std::string model_data) { std::string model_path = get_model_subdir(model_id); std::string metadata_path = model_path + "/metadata.json"; std::ofstream metadata_file(metadata_path); if (!metadata_file) { return Option<bool>(500, "Failed to create metadata file"); } metadata_file << model_json.dump(4); metadata_file.close(); if (!metadata_file) { return Option<bool>(500, "Failed to write metadata file"); } if (!model_data.empty()) { if (!ArchiveUtils::verify_tar_gz_archive(model_data)) { return Option<bool>(400, "Invalid model archive format"); } std::filesystem::path model_dir(model_path); for (const auto& entry : std::filesystem::directory_iterator(model_dir)) { if (entry.path().filename() != "metadata.json") { std::filesystem::remove_all(entry.path()); } } if (!ArchiveUtils::extract_tar_gz_from_memory(model_data, model_path)) { return Option<bool>(500, "Failed to extract model archive"); } } return Option<bool>(true); } Option<bool> PersonalizationModel::delete_model(const std::string& model_id) { std::string model_path = get_model_subdir(model_id); if (!std::filesystem::exists(model_path)) { return Option<bool>(404, "Model directory not found"); } try { std::filesystem::remove_all(model_path); return Option<bool>(true); } catch (const std::filesystem::filesystem_error& e) { return Option<bool>(500, "Failed to delete model directory: " + std::string(e.what())); } }
5,797
C++
.cpp
125
40.184
142
0.648254
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,776
id_list.cpp
typesense_typesense/src/id_list.cpp
#include "id_list.h" #include <algorithm> #include "for.h" /* block_t operations */ bool id_list_t::block_t::contains(uint32_t id) { return ids.contains(id); } uint32_t id_list_t::block_t::upsert(const uint32_t id) { if (ids.contains(id)) { return 0; } ids.append(id); return 1; } uint32_t id_list_t::block_t::erase(const uint32_t id) { uint32_t doc_index = ids.indexOf(id); if (doc_index == ids.getLength()) { return 0; } ids.remove_value(id); return 1; } /* iterator_t operations */ id_list_t::iterator_t::iterator_t(id_list_t::block_t* start, id_list_t::block_t* end, std::map<last_id_t, block_t*>* id_block_map, bool reverse): curr_block(start), curr_index(0), end_block(end), id_block_map(id_block_map), reverse(reverse) { if(curr_block != end_block) { ids = curr_block->ids.uncompress(); if(reverse) { curr_index = curr_block->ids.getLength()-1; } } } bool id_list_t::iterator_t::valid() const { if(reverse) { return (curr_block != end_block) && (curr_index >= 0); } else { return (curr_block != end_block) && (curr_index < curr_block->size()); } } void id_list_t::iterator_t::next() { curr_index++; if(curr_index == curr_block->size()) { curr_index = 0; curr_block = curr_block->next; delete [] ids; ids = nullptr; if(curr_block != end_block) { ids = curr_block->ids.uncompress(); } } } void id_list_t::iterator_t::previous() { curr_index--; if(curr_index < 0) { // since block stores only the next pointer, we have to use `id_block_map` for reverse iteration auto last_ele = ids[curr_block->size()-1]; auto it = id_block_map->find(last_ele); if(it != id_block_map->end() && it != id_block_map->begin()) { it--; curr_block = it->second; curr_index = curr_block->size()-1; delete [] ids; ids = curr_block->ids.uncompress(); } else { curr_block = end_block; } } } uint32_t id_list_t::iterator_t::id() const { return ids[curr_index]; } uint32_t id_list_t::iterator_t::index() const { return curr_index; } id_list_t::block_t* id_list_t::iterator_t::block() const { return curr_block; } uint32_t id_list_t::iterator_t::last_block_id() const { auto size = curr_block->size(); if(size == 0) { return 0; } return ids[size - 1]; } void id_list_t::iterator_t::reset_cache() { delete [] ids; ids = nullptr; curr_index = 0; curr_block = end_block = nullptr; } void id_list_t::iterator_t::skip_n(uint32_t n) { while(curr_block != end_block) { curr_index += n; if(curr_index < curr_block->size()) { return; } n = (curr_index - curr_block->size() + 1); curr_block = curr_block->next; delete [] ids; ids = nullptr; if(curr_block != end_block) { curr_index = 0; n--; ids = curr_block->ids.uncompress(); } else { reset_cache(); } } } void id_list_t::iterator_t::skip_to(uint32_t id) { // first look to skip within current block if(id <= this->last_block_id()) { while(curr_index < curr_block->size() && this->id() < id) { curr_index++; } return ; } reset_cache(); const auto it = id_block_map->lower_bound(id); if(it == id_block_map->end()) { return; } curr_block = it->second; curr_index = 0; ids = curr_block->ids.uncompress(); while(curr_index < curr_block->size() && this->id() < id) { curr_index++; } if(curr_index == curr_block->size()) { reset_cache(); } } id_list_t::iterator_t::~iterator_t() { delete [] ids; ids = nullptr; } id_list_t::iterator_t& id_list_t::iterator_t::operator=(id_list_t::iterator_t&& obj) noexcept { if (&obj == this) { return *this; } delete [] ids; ids = obj.ids; obj.ids = nullptr; curr_block = obj.curr_block; curr_index = obj.curr_index; end_block = obj.end_block; id_block_map = obj.id_block_map; reverse = obj.reverse; return *this; } id_list_t::iterator_t::iterator_t(iterator_t&& rhs) noexcept { curr_block = rhs.curr_block; curr_index = rhs.curr_index; end_block = rhs.end_block; ids = rhs.ids; id_block_map = rhs.id_block_map; reverse = rhs.reverse; rhs.curr_block = nullptr; rhs.end_block = nullptr; rhs.ids = nullptr; rhs.id_block_map = nullptr; } /* id_list_t operations */ id_list_t::id_list_t(uint16_t max_block_elements): BLOCK_MAX_ELEMENTS(max_block_elements) { if(max_block_elements <= 1) { throw std::invalid_argument("max_block_elements must be > 1"); } } id_list_t::~id_list_t() { block_t* block = root_block.next; while(block != nullptr) { block_t* next_block = block->next; delete block; block = next_block; } } void id_list_t::merge_adjacent_blocks(id_list_t::block_t* block1, id_list_t::block_t* block2, size_t num_block2_ids_to_move) { // merge ids uint32_t* ids1 = block1->ids.uncompress(); uint32_t* ids2 = block2->ids.uncompress(); uint32_t* new_ids = new uint32_t[block1->size() + num_block2_ids_to_move]; std::memmove(new_ids, ids1, sizeof(uint32_t) * block1->size()); std::memmove(new_ids + block1->size(), ids2, sizeof(uint32_t) * num_block2_ids_to_move); block1->ids.load(new_ids, block1->size() + num_block2_ids_to_move); if(block2->size() != num_block2_ids_to_move) { block2->ids.load(ids2 + num_block2_ids_to_move, block2->size() - num_block2_ids_to_move); } else { block2->ids.load(nullptr, 0); } delete [] ids1; delete [] ids2; delete [] new_ids; } void id_list_t::split_block(id_list_t::block_t* src_block, id_list_t::block_t* dst_block) { if(src_block->size() <= 1) { return; } uint32_t* raw_ids = src_block->ids.uncompress(); size_t ids_first_half_length = (src_block->size() / 2); size_t ids_second_half_length = (src_block->size() - ids_first_half_length); src_block->ids.load(raw_ids, ids_first_half_length); dst_block->ids.load(raw_ids + ids_first_half_length, ids_second_half_length); delete [] raw_ids; } void id_list_t::upsert(const uint32_t id) { // first we will locate the block where `id` should reside block_t* upsert_block; last_id_t before_upsert_last_id; if(id_block_map.empty()) { upsert_block = &root_block; before_upsert_last_id = UINT32_MAX; } else { const auto it = id_block_map.lower_bound(id); upsert_block = (it == id_block_map.end()) ? id_block_map.rbegin()->second : it->second; before_upsert_last_id = upsert_block->ids.last(); } // happy path: upsert_block is not full if(upsert_block->size() < BLOCK_MAX_ELEMENTS) { uint32_t num_inserted = upsert_block->upsert(id); ids_length += num_inserted; last_id_t after_upsert_last_id = upsert_block->ids.last(); if(before_upsert_last_id != after_upsert_last_id) { id_block_map.erase(before_upsert_last_id); id_block_map.emplace(after_upsert_last_id, upsert_block); } } else { block_t* new_block = new block_t; if(upsert_block->next == nullptr && upsert_block->ids.last() < id) { // appending to the end of the last block where the id will reside on a newly block uint32_t num_inserted = new_block->upsert(id); ids_length += num_inserted; } else { // upsert and then split block uint32_t num_inserted = upsert_block->upsert(id); ids_length += num_inserted; // evenly divide elements between both blocks split_block(upsert_block, new_block); last_id_t after_upsert_last_id = upsert_block->ids.last(); id_block_map.erase(before_upsert_last_id); id_block_map.emplace(after_upsert_last_id, upsert_block); } last_id_t after_new_block_id = new_block->ids.last(); id_block_map.emplace(after_new_block_id, new_block); new_block->next = upsert_block->next; upsert_block->next = new_block; } } void id_list_t::erase(const uint32_t id) { const auto it = id_block_map.lower_bound(id); if(it == id_block_map.end()) { return ; } block_t* erase_block = it->second; last_id_t before_last_id = it->first; uint32_t num_erased = erase_block->erase(id); ids_length -= num_erased; size_t new_ids_length = erase_block->size(); if(new_ids_length == 0) { // happens when the last element of last block is deleted if(erase_block != &root_block) { // since we will be deleting the empty node, set the previous node's next pointer to null std::prev(it)->second->next = nullptr; delete erase_block; } else { // The root block cannot be empty if there are other blocks so we will pull some contents from next block // This is only an issue for blocks with max size of 2 if(root_block.next != nullptr) { auto next_block_last_id = erase_block->next->ids.last(); merge_adjacent_blocks(erase_block, erase_block->next, erase_block->next->size()/2); id_block_map.erase(next_block_last_id); id_block_map.emplace(erase_block->next->ids.last(), erase_block->next); id_block_map.emplace(erase_block->ids.last(), erase_block); } } id_block_map.erase(before_last_id); return; } if(new_ids_length >= BLOCK_MAX_ELEMENTS/2 || erase_block->next == nullptr) { last_id_t after_last_id = erase_block->ids.last(); if(before_last_id != after_last_id) { id_block_map.erase(before_last_id); id_block_map.emplace(after_last_id, erase_block); } return ; } // block is less than 50% of max capacity and contains a next node which we can refill from auto next_block = erase_block->next; last_id_t next_block_last_id = next_block->ids.last(); if(erase_block->size() + next_block->size() <= BLOCK_MAX_ELEMENTS) { // we can merge the contents of next block with `erase_block` and delete the next block merge_adjacent_blocks(erase_block, next_block, next_block->size()); erase_block->next = next_block->next; delete next_block; id_block_map.erase(next_block_last_id); } else { // Only part of the next block can be moved over. // We will move only 50% of max elements to ensure that we don't end up "flipping" adjacent blocks: // 1, 5 -> 5, 1 size_t num_block2_ids = BLOCK_MAX_ELEMENTS/2; merge_adjacent_blocks(erase_block, next_block, num_block2_ids); // NOTE: we don't have to update `id_block_map` for `next_block` as last element doesn't change } last_id_t after_last_id = erase_block->ids.last(); if(before_last_id != after_last_id) { id_block_map.erase(before_last_id); id_block_map.emplace(after_last_id, erase_block); } } id_list_t::block_t* id_list_t::get_root() { return &root_block; } size_t id_list_t::num_blocks() const { return id_block_map.size(); } uint32_t id_list_t::first_id() { if(ids_length == 0) { return 0; } return root_block.ids.at(0); } uint32_t id_list_t::last_id() { if(id_block_map.empty()) { return 0; } return id_block_map.rbegin()->first; } id_list_t::block_t* id_list_t::block_of(uint32_t id) { const auto it = id_block_map.lower_bound(id); if(it == id_block_map.end()) { return nullptr; } return it->second; } void id_list_t::merge(const std::vector<id_list_t*>& id_lists, std::vector<uint32_t>& result_ids) { auto its = std::vector<id_list_t::iterator_t>(); its.reserve(id_lists.size()); size_t sum_sizes = 0; for(const auto& id_list: id_lists) { its.push_back(id_list->new_iterator()); sum_sizes += id_list->num_ids(); } result_ids.reserve(sum_sizes); size_t num_lists = its.size(); switch (num_lists) { case 2: while(!at_end2(its)) { if(equals2(its)) { //LOG(INFO) << its[0].id(); result_ids.push_back(its[0].id()); advance_all2(its); } else { uint32_t smallest_value = advance_smallest2(its); result_ids.push_back(smallest_value); } } while(its[0].valid()) { result_ids.push_back(its[0].id()); its[0].next(); } while(its[1].valid()) { result_ids.push_back(its[1].id()); its[1].next(); } break; default: while(!at_end(its)) { if(equals(its)) { result_ids.push_back(its[0].id()); advance_all(its); } else { uint32_t smallest_value = advance_smallest(its); result_ids.push_back(smallest_value); } } for(auto& it: its) { while(it.valid()) { result_ids.push_back(it.id()); it.next(); } } } } // Inspired by: https://stackoverflow.com/a/25509185/131050 void id_list_t::intersect(const std::vector<id_list_t*>& id_lists, std::vector<uint32_t>& result_ids) { if(id_lists.empty()) { return; } if(id_lists.size() == 1) { result_ids.reserve(id_lists[0]->ids_length); auto it = id_lists[0]->new_iterator(); while(it.valid()) { result_ids.push_back(it.id()); it.next(); } return ; } auto its = std::vector<id_list_t::iterator_t>(); its.reserve(id_lists.size()); for(const auto& id_list: id_lists) { its.push_back(id_list->new_iterator()); } size_t num_lists = its.size(); switch (num_lists) { case 2: while(!at_end2(its)) { if(equals2(its)) { //LOG(INFO) << its[0].id(); result_ids.push_back(its[0].id()); advance_all2(its); } else { advance_non_largest2(its); } } break; default: while(!at_end(its)) { if(equals(its)) { //LOG(INFO) << its[0].id(); result_ids.push_back(its[0].id()); advance_all(its); } else { advance_non_largest(its); } } } } bool id_list_t::at_end(const std::vector<id_list_t::iterator_t>& its) { // if any one iterator is at end, we can stop for(const auto& it : its) { if(!it.valid()) { return true; } } return false; } bool id_list_t::at_end2(const std::vector<id_list_t::iterator_t>& its) { // if any one iterator is at end, we can stop return !its[0].valid() || !its[1].valid(); } bool id_list_t::equals(std::vector<id_list_t::iterator_t>& its) { for(size_t i = 0; i < its.size() - 1; i++) { if(its[i].id() != its[i+1].id()) { return false; } } return true; } bool id_list_t::equals2(std::vector<id_list_t::iterator_t>& its) { return its[0].id() == its[1].id(); } id_list_t::iterator_t id_list_t::new_iterator(block_t* start_block, block_t* end_block) { start_block = (start_block == nullptr) ? &root_block : start_block; return id_list_t::iterator_t(start_block, end_block, &id_block_map, false); } id_list_t::iterator_t id_list_t::new_rev_iterator() { block_t* start_block = nullptr; if(!id_block_map.empty()) { start_block = id_block_map.rbegin()->second; } auto rev_it = id_list_t::iterator_t(start_block, nullptr, &id_block_map, true); return rev_it; } void id_list_t::advance_all(std::vector<id_list_t::iterator_t>& its) { for(auto& it: its) { it.next(); } } void id_list_t::advance_all2(std::vector<id_list_t::iterator_t>& its) { its[0].next(); its[1].next(); } void id_list_t::advance_non_largest(std::vector<id_list_t::iterator_t>& its) { // we will find the iter with greatest value and then advance the rest until their value catches up uint32_t greatest_value = 0; for(size_t i = 0; i < its.size(); i++) { if(its[i].id() > greatest_value) { greatest_value = its[i].id(); } } for(size_t i = 0; i < its.size(); i++) { if(its[i].id() != greatest_value) { its[i].skip_to(greatest_value); } } } void id_list_t::advance_non_largest2(std::vector<id_list_t::iterator_t>& its) { if(its[0].id() > its[1].id()) { its[1].skip_to(its[0].id()); } else { its[0].skip_to(its[1].id()); } } uint32_t id_list_t::advance_smallest(std::vector<id_list_t::iterator_t>& its) { // we will advance the iterator(s) with the smallest value and then return that value uint32_t smallest_value = UINT32_MAX; for(size_t i = 0; i < its.size(); i++) { if(its[i].id() < smallest_value) { smallest_value = its[i].id(); } } for(size_t i = 0; i < its.size(); i++) { if(its[i].id() == smallest_value) { its[i].next(); } } return smallest_value; } uint32_t id_list_t::advance_smallest2(std::vector<id_list_t::iterator_t>& its) { uint32_t smallest_value = 0; if(its[0].id() < its[1].id()) { smallest_value = its[0].id(); its[0].next(); } else { smallest_value = its[1].id(); its[1].next(); } return smallest_value; } size_t id_list_t::num_ids() const { return ids_length; } bool id_list_t::contains(uint32_t id) { const auto it = id_block_map.lower_bound(id); if(it == id_block_map.end()) { return false; } block_t* potential_block = it->second; return potential_block->contains(id); } bool id_list_t::contains_atleast_one(const uint32_t* target_ids, size_t target_ids_size) { id_list_t::iterator_t it = new_iterator(); size_t target_ids_index = 0; while(target_ids_index < target_ids_size && it.valid()) { uint32_t id = it.id(); if(id == target_ids[target_ids_index]) { return true; } else { // advance smallest value if(id > target_ids[target_ids_index]) { while(target_ids_index < target_ids_size && target_ids[target_ids_index] < id) { target_ids_index++; } } else { it.skip_to(target_ids[target_ids_index]); } } } return false; } bool id_list_t::take_id(result_iter_state_t& istate, uint32_t id) { // decide if this result id should be excluded if(istate.excluded_result_ids_size != 0) { if (std::binary_search(istate.excluded_result_ids, istate.excluded_result_ids + istate.excluded_result_ids_size, id)) { return false; } } // decide if this result be matched with filter results if(istate.filter_ids_length != 0) { return std::binary_search(istate.filter_ids, istate.filter_ids + istate.filter_ids_length, id); } return true; } void id_list_t::uncompress(std::vector<uint32_t>& data) { auto it = new_iterator(); data.reserve(data.size() + ids_length); while(it.valid()) { data.push_back(it.id()); it.next(); } } uint32_t* id_list_t::uncompress() { uint32_t* arr = new uint32_t[ids_length]; auto it = new_iterator(); size_t i = 0; while(it.valid()) { arr[i++] = it.id(); it.next(); } return arr; } size_t id_list_t::intersect_count(const uint32_t *res_ids, size_t res_ids_len, bool estimate_facets, size_t facet_sample_interval) { size_t count = 0; size_t res_index = 0; auto it = new_iterator(); if(estimate_facets) { while(it.valid() && res_index < res_ids_len) { if(it.id() == res_ids[res_index]) { count++; it.skip_n(facet_sample_interval); res_index += facet_sample_interval; } else if(it.id() < res_ids[res_index]) { it.skip_n(facet_sample_interval); } else { res_index += facet_sample_interval; } } } else { while(it.valid() && res_index < res_ids_len) { if(it.id() == res_ids[res_index]) { count++; it.next(); res_index += 1; } else if(it.id() < res_ids[res_index]) { it.next(); } else { res_index += 1; } } } //LOG(INFO) << "estimate_facets: " << estimate_facets << ", res_ids_len: " << res_ids_len // << ", skip_interval: " << facet_sample_interval << ", count: " << count; if(estimate_facets) { count = count * facet_sample_interval * facet_sample_interval; } return std::min<size_t>(ids_length, count); }
21,760
C++
.cpp
620
27.266129
117
0.558847
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,777
conversation_model_manager.cpp
typesense_typesense/src/conversation_model_manager.cpp
#include "conversation_model_manager.h" #include "conversation_model.h" #include "conversation_manager.h" Option<nlohmann::json> ConversationModelManager::get_model(const std::string& model_id) { std::shared_lock lock(models_mutex); auto it = models.find(model_id); if (it == models.end()) { return Option<nlohmann::json>(404, "Model not found"); } return Option<nlohmann::json>(it->second); } Option<bool> ConversationModelManager::add_model(nlohmann::json& model, const std::string& model_id, const bool write_to_disk) { std::unique_lock lock(models_mutex); if (models.find(model_id) != models.end()) { return Option<bool>(409, "Model already exists"); } model["id"] = model_id.empty() ? sole::uuid4().str() : model_id; if(model.count("ttl") == 0) { model["ttl"] = (uint64_t)(60 * 60 * 24); } auto validate_res = ConversationModel::validate_model(model); if (!validate_res.ok()) { return Option<bool>(validate_res.code(), validate_res.error()); } models[model["id"]] = model; if(write_to_disk) { auto model_key = get_model_key(model["id"]); bool insert_op = store->insert(model_key, model.dump(0)); if(!insert_op) { return Option<bool>(500, "Error while inserting model into the store"); } } return Option<bool>(true); } Option<nlohmann::json> ConversationModelManager::delete_model(const std::string& model_id) { std::unique_lock lock(models_mutex); return delete_model_unsafe(model_id); } Option<nlohmann::json> ConversationModelManager::delete_model_unsafe(const std::string& model_id) { auto it = models.find(model_id); if (it == models.end()) { return Option<nlohmann::json>(404, "Model not found"); } nlohmann::json model = it->second; auto model_key = get_model_key(model_id); bool delete_op = store->remove(model_key); if(!delete_op) { return Option<nlohmann::json>(500, "Error while deleting model from the store"); } models.erase(it); return Option<nlohmann::json>(model); } Option<nlohmann::json> ConversationModelManager::get_all_models() { std::shared_lock lock(models_mutex); nlohmann::json models_json = nlohmann::json::array(); for (auto& [id, model] : models) { models_json.push_back(model); } return Option<nlohmann::json>(models_json); } Option<nlohmann::json> ConversationModelManager::update_model(const std::string& model_id, nlohmann::json model) { std::unique_lock lock(models_mutex); auto it = models.find(model_id); if (it == models.end()) { return Option<nlohmann::json>(404, "Model not found"); } nlohmann::json model_copy = it->second; for (auto& [key, value] : model.items()) { model_copy[key] = value; } auto validate_res = ConversationModel::validate_model(model_copy); if (!validate_res.ok()) { return Option<nlohmann::json>(validate_res.code(), validate_res.error()); } auto model_key = get_model_key(model_id); bool insert_op = store->insert(model_key, model_copy.dump(0)); if(!insert_op) { return Option<nlohmann::json>(500, "Error while inserting model into the store"); } models[model_id] = model_copy; return Option<nlohmann::json>(model_copy); } Option<int> ConversationModelManager::init(Store* store) { ConversationModelManager::store = store; std::vector<std::string> model_strs; store->scan_fill(std::string(MODEL_KEY_PREFIX) + "_", std::string(MODEL_KEY_PREFIX) + "`", model_strs); if(!model_strs.empty()) { LOG(INFO) << "Found " << model_strs.size() << " conversation model(s)."; } int loaded_models = 0; for(auto& model_str : model_strs) { nlohmann::json model_json = nlohmann::json::parse(model_str); const std::string& model_id = model_json["id"]; // handle model format changes auto has_migration = migrate_model(model_json); // write to disk only when a migration has been done on model data auto add_op = add_model(model_json, model_id, has_migration); if(!add_op.ok()) { LOG(ERROR) << "Error while loading conversation model: " << model_id << ", error: " << add_op.error(); continue; } loaded_models++; } return Option<int>(loaded_models); } const std::string ConversationModelManager::get_model_key(const std::string& model_id) { return std::string(MODEL_KEY_PREFIX) + "_" + model_id; } Option<Collection*> ConversationModelManager::create_default_history_collection(const std::string& model_id) { std::string collection_name = "ts_conversation_history_" + model_id; auto get_res = CollectionManager::get_instance().get_collection(collection_name).get(); if(get_res) { return Option<Collection*>(get_res); } nlohmann::json schema_json = R"({ "fields": [ { "name": "conversation_id", "type": "string" }, { "name": "role", "type": "string", "index": false }, { "name": "message", "type": "string", "index": false }, { "name": "timestamp", "type": "int32", "sort": true }, { "name": "model_id", "type": "string" } ] })"_json; schema_json["name"] = collection_name; auto create_res = CollectionManager::get_instance().create_collection(schema_json); if(!create_res.ok()) { return Option<Collection*>(create_res.code(), create_res.error()); } return Option<Collection*>(create_res.get()); } bool ConversationModelManager::migrate_model(nlohmann::json& model) { // handles missing fields and format changes auto model_id = model["id"]; bool has_model_change = false; // Migrate cloudflare models to new namespace convention, change namespace from `cf` to `cloudflare` if(EmbedderManager::get_model_namespace(model["model_name"]) == "cf") { model["model_name"] = "cloudflare/@cf/" + EmbedderManager::get_model_name_without_namespace(model["model_name"]); has_model_change = true; } if(model.count("history_collection") == 0) { auto default_collection_op = create_default_history_collection(model_id); if(!default_collection_op.ok()) { LOG(INFO) << "Error while creating default history collection for model " << model_id << ": " << default_collection_op.error(); return false; } model["history_collection"] = default_collection_op.get()->get_name(); has_model_change = true; } if(model.count("ttl") == 0) { model["ttl"] = (uint64_t)(60 * 60 * 24); has_model_change = true; } return has_model_change; } std::unordered_set<std::string> ConversationModelManager::get_history_collections() { std::unordered_set<std::string> collections; for(auto& [id, model] : models) { if(model.find("history_collection") == model.end()) { continue; } collections.insert(model["history_collection"].get<std::string>()); } return collections; }
7,470
C++
.cpp
184
33.097826
121
0.613238
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,778
image_processor.cpp
typesense_typesense/src/image_processor.cpp
#include "image_processor.h" #include "logger.h" CLIPImageProcessor::CLIPImageProcessor(const std::string& model_path) { Ort::SessionOptions session_options; session_options.EnableOrtCustomOps(); auto processor_path = model_path + "/clip_image_processor.onnx"; LOG(INFO) << "Loading image processor from " << processor_path; session_ = std::make_unique<Ort::Session>(env_, processor_path.c_str(), session_options); } Option<processed_image_t> CLIPImageProcessor::process_image(const std::string& image_encoded) { std::unique_lock<std::mutex> lock(mutex_); // Decode image auto image = StringUtils::base64_decode(image_encoded); // convert string to byte array std::vector<uint8_t> image_bytes(image.begin(), image.end()); // Create input tensor int64_t input_tensor_size = image_bytes.size(); std::vector<int64_t> input_shape = {input_tensor_size}; std::vector<const char*> input_names = {"image"}; auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); auto input_tensor = Ort::Value::CreateTensor<uint8_t>(memory_info, image_bytes.data(), image_bytes.size(), input_shape.data(), input_shape.size()); // Create output tensor std::vector<const char*> output_names = {"last_hidden_state"}; // Run inference std::vector<Ort::Value> output_tensors; // LOG(INFO) << "Running image processor"; try { output_tensors = session_->Run(Ort::RunOptions{nullptr}, input_names.data(), &input_tensor, 1, output_names.data(), output_names.size()); } catch (...) { return Option<processed_image_t>(400, "Error while processing image"); } // Get output tensor auto output_tensor = output_tensors.front().GetTensorMutableData<float>(); // Convert output tensor to processed_image_t auto output_shape = output_tensors.front().GetTensorTypeAndShapeInfo().GetShape(); if (output_shape.size() != 4) { LOG(INFO) << "Output tensor shape is not 4D"; return Option<processed_image_t>(400, "Error while processing image"); } processed_image_t output; for (size_t i = 0; i < output_shape[0]; i++) { for (size_t j = 0; j < output_shape[1]; j++) { for (size_t k = 0; k < output_shape[2]; k++) { for (size_t l = 0; l < output_shape[3]; l++) { output.push_back(output_tensor[i * output_shape[1] * output_shape[2] * output_shape[3] + j * output_shape[2] * output_shape[3] + k * output_shape[3] + l]); } } } } // LOG(INFO) << "Image processed"; return Option<processed_image_t>(std::move(output)); }
2,678
C++
.cpp
52
45.076923
175
0.65184
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,779
japanese_localizer.cpp
typesense_typesense/src/japanese_localizer.cpp
#include "japanese_localizer.h" #include "japanese_data.h" #include "string_utils.h" #include "logger.h" #include <cstdlib> #include <fstream> extern "C" { #include "libkakasi.h" } void JapaneseLocalizer::write_data_file(const std::string &base64_data, const std::string &file_name) { const std::string& binary_str = StringUtils::base64_decode(base64_data); std::ofstream out(file_name, std::ios::out | std::ios::binary); out << binary_str; out.flush(); out.close(); } bool JapaneseLocalizer::init() { const std::string kanwa_data_file_path = "/tmp/kanwa.data"; const std::string itaji_data_file_path = "/tmp/itaji.data"; write_data_file(JA_DATA::kanwa_dict, kanwa_data_file_path); write_data_file(JA_DATA::itaji_dict, itaji_data_file_path); setenv("KANWADICTPATH", kanwa_data_file_path.c_str(), true); setenv("ITAIJIDICTPATH", itaji_data_file_path.c_str(), true); // initialize kakasi datastructures std::vector<std::string> arguments = {"./kakasi", "-JH", "-KH", "-s", "-iutf8", "-outf8"}; std::vector<char*> argv; for (const auto& arg : arguments) { argv.push_back((char*)arg.data()); } if (kakasi_getopt_argv(argv.size(), argv.data()) != 0) { LOG(ERROR) << "Kakasi initialization failed."; return false; } return true; } char* JapaneseLocalizer::normalize(const std::string& text) { std::unique_lock lk(m); return kakasi_do((char *)text.c_str()); } JapaneseLocalizer::JapaneseLocalizer() { init(); }
1,530
C++
.cpp
42
32.428571
103
0.667794
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,780
conversation_model.cpp
typesense_typesense/src/conversation_model.cpp
#include <regex> #include <iterator> #include "conversation_model.h" #include "embedder_manager.h" #include "text_embedder_remote.h" #include "conversation_manager.h" const std::string get_model_namespace(const std::string& model_name) { if(model_name.find("/") != std::string::npos) { return model_name.substr(0, model_name.find("/")); } else { return ""; } } Option<bool> ConversationModel::validate_model(const nlohmann::json& model_config) { // check model_name exists and it is a string if(model_config.count("model_name") == 0 || !model_config["model_name"].is_string()) { return Option<bool>(400, "Property `model_name` is not provided or not a string."); } if(model_config.count("system_prompt") != 0 && !model_config["system_prompt"].is_string()) { return Option<bool>(400, "Property `system_prompt` is not a string."); } if(model_config.count("history_collection") == 0 || !model_config["history_collection"].is_string()) { return Option<bool>(400, "Property `history_collection` is missing or is not a string."); } if(model_config.count("max_bytes") == 0 || !model_config["max_bytes"].is_number_unsigned() || model_config["max_bytes"].get<size_t>() == 0) { return Option<bool>(400, "Property `max_bytes` is not provided or not a positive integer."); } auto validate_converson_collection_op = ConversationManager::get_instance() .validate_conversation_store_collection(model_config["history_collection"].get<std::string>()); if(!validate_converson_collection_op.ok()) { return Option<bool>(400, validate_converson_collection_op.error()); } if(model_config.count("ttl") != 0 && !model_config["ttl"].is_number_unsigned()) { return Option<bool>(400, "Property `ttl` is not a positive integer."); } const std::string model_namespace = get_model_namespace(model_config["model_name"].get<std::string>()); if(model_namespace == "openai") { return OpenAIConversationModel::validate_model(model_config); } else if(model_namespace == "cloudflare") { return CFConversationModel::validate_model(model_config); } else if(model_namespace == "vllm") { return vLLMConversationModel::validate_model(model_config); } return Option<bool>(400, "Model namespace `" + model_namespace + "` is not supported."); } Option<std::string> ConversationModel::get_answer(const std::string& context, const std::string& prompt, const nlohmann::json& model_config) { const std::string& model_namespace = get_model_namespace(model_config["model_name"].get<std::string>()); std::string system_prompt = ""; if(model_config.count("system_prompt") != 0 && model_config["system_prompt"].is_string()) { system_prompt = model_config["system_prompt"].get<std::string>(); } if(model_namespace == "openai") { return OpenAIConversationModel::get_answer(context, prompt, system_prompt, model_config); } else if(model_namespace == "cloudflare") { return CFConversationModel::get_answer(context, prompt, system_prompt, model_config); } else if(model_namespace == "vllm") { return vLLMConversationModel::get_answer(context, prompt, system_prompt, model_config); } return Option<std::string>(400, "Model namespace " + model_namespace + " is not supported."); } Option<std::string> ConversationModel::get_standalone_question(const nlohmann::json& conversation_history, const std::string& question, const nlohmann::json& model_config) { const std::string model_namespace = get_model_namespace(model_config["model_name"].get<std::string>()); if(model_namespace == "openai") { return OpenAIConversationModel::get_standalone_question(conversation_history, question, model_config); } else if(model_namespace == "cloudflare") { return CFConversationModel::get_standalone_question(conversation_history, question, model_config); } else if(model_namespace == "vllm") { return vLLMConversationModel::get_standalone_question(conversation_history, question, model_config); } return Option<std::string>(400, "Model namespace " + model_namespace + " is not supported."); } Option<nlohmann::json> ConversationModel::format_question(const std::string& message, const nlohmann::json& model_config) { const std::string model_namespace = get_model_namespace(model_config["model_name"].get<std::string>()); if(model_namespace == "openai") { return OpenAIConversationModel::format_question(message); } else if(model_namespace == "cloudflare") { return CFConversationModel::format_question(message); } else if(model_namespace == "vllm") { return vLLMConversationModel::format_question(message); } return Option<nlohmann::json>(400, "Model namespace " + model_namespace + " is not supported."); } Option<nlohmann::json> ConversationModel::format_answer(const std::string& message, const nlohmann::json& model_config) { const std::string model_namespace = get_model_namespace(model_config["model_name"].get<std::string>()); if(model_namespace == "openai") { return OpenAIConversationModel::format_answer(message); } else if(model_namespace == "cloudflare") { return CFConversationModel::format_answer(message); } else if(model_namespace == "vllm") { return vLLMConversationModel::format_answer(message); } return Option<nlohmann::json>(400, "Model namespace " + model_namespace + " is not supported."); } Option<size_t> ConversationModel::get_minimum_required_bytes(const nlohmann::json& model_config) { const std::string model_namespace = get_model_namespace(model_config["model_name"].get<std::string>()); if(model_namespace == "openai") { return Option<size_t>(OpenAIConversationModel::get_minimum_required_bytes()); } else if(model_namespace == "cloudflare") { return Option<size_t>(CFConversationModel::get_minimum_required_bytes()); } else if(model_namespace == "vllm") { return Option<size_t>(vLLMConversationModel::get_minimum_required_bytes()); } return Option<size_t>(400, "Model namespace " + model_namespace + " is not supported."); } Option<bool> OpenAIConversationModel::validate_model(const nlohmann::json& model_config) { if(model_config.count("api_key") == 0) { return Option<bool>(400, "API key is not provided"); } if(!model_config["api_key"].is_string()) { return Option<bool>(400, "API key is not a string"); } std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Authorization"] = "Bearer " + model_config["api_key"].get<std::string>(); headers["Content-Type"] = "application/json"; std::string res; auto res_code = RemoteEmbedder::call_remote_api("GET", OPENAI_LIST_MODELS, "", res, res_headers, headers); if(res_code == 408) { return Option<bool>(408, "OpenAI API timeout."); } nlohmann::json models_json; try { models_json = nlohmann::json::parse(res); } catch (const std::exception& e) { return Option<bool>(400, "Error parsing OpenAI API response: " + res); } if(res_code != 200) { if(models_json.count("error") == 0 || models_json["error"].count("message") == 0) { return Option<bool>(400, "OpenAI API error, response: " + res); } return Option<bool>(400, "OpenAI API error: " + models_json["error"]["message"].get<std::string>()); } // extract model name by removing "openai/" prefix auto model_name_without_namespace = EmbedderManager::get_model_name_without_namespace( model_config["model_name"].get<std::string>()); bool found = false; for (auto& model : models_json["data"]) { if (model["id"] == model_name_without_namespace) { found = true; break; } } if(!found) { return Option<bool>(400, "Property `model_name` is not a valid OpenAI model."); } nlohmann::json req_body; req_body["model"] = model_name_without_namespace; req_body["messages"] = R"([ { "role":"user", "content":"hello" } ])"_json; std::string chat_res; res_code = RemoteEmbedder::call_remote_api("POST", OPENAI_CHAT_COMPLETION, req_body.dump(), chat_res, res_headers, headers); if(res_code == 408) { return Option<bool>(408, "OpenAI API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(chat_res); } catch (const std::exception& e) { return Option<bool>(400, "OpenAI API error: " + chat_res); } if(json_res.count("error") == 0 || json_res["error"].count("message") == 0) { return Option<bool>(400, "OpenAI API error: " + chat_res); } return Option<bool>(400, "OpenAI API error: " + json_res["error"]["message"].get<std::string>()); } return Option<bool>(true); } Option<std::string> OpenAIConversationModel::get_answer(const std::string& context, const std::string& prompt, const std::string& system_prompt, const nlohmann::json& model_config) { const std::string model_name = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); const std::string api_key = model_config["api_key"].get<std::string>(); std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Authorization"] = "Bearer " + api_key; headers["Content-Type"] = "application/json"; nlohmann::json req_body; req_body["model"] = model_name; req_body["messages"] = nlohmann::json::array(); if(!system_prompt.empty()) { nlohmann::json system_message = nlohmann::json::object(); system_message["role"] = "system"; system_message["content"] = system_prompt; req_body["messages"].push_back(system_message); } nlohmann::json message = nlohmann::json::object(); message["role"] = "user"; message["content"] = DATA_STR + context + QUESTION_STR + prompt + ANSWER_STR; req_body["messages"].push_back(message); std::string res; auto res_code = RemoteEmbedder::call_remote_api("POST", OPENAI_CHAT_COMPLETION, req_body.dump(), res, res_headers, headers); if(res_code == 408) { throw Option<std::string>(400, "OpenAI API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); } catch (const std::exception& e) { throw Option<std::string>(400, "OpenAI API error: " + res); } if(json_res.count("error") == 0 || json_res["error"].count("message") == 0) { throw Option<std::string>(400, "OpenAI API error: " + res); } throw Option<std::string>(400, "OpenAI API error: " + nlohmann::json::parse(res)["error"]["message"].get<std::string>()); } nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); if(json_res.count("choices") == 0 || json_res["choices"].size() == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } if(json_res["choices"][0].count("message") == 0 || json_res["choices"][0]["message"].count("content") == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } } catch (const std::exception& e) { throw Option<std::string>(400, "Got malformed response from OpenAI API."); } return Option<std::string>(json_res["choices"][0]["message"]["content"].get<std::string>()); } Option<std::string> OpenAIConversationModel::get_standalone_question(const nlohmann::json& conversation_history, const std::string& question, const nlohmann::json& model_config) { const size_t min_required_bytes = CONVERSATION_HISTORY.size() + QUESTION.size() + STANDALONE_QUESTION_PROMPT.size() + question.size(); if(model_config["max_bytes"].get<size_t>() < min_required_bytes) { return Option<std::string>(400, "Max bytes is not enough to generate standalone question."); } const std::string model_name = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); const std::string api_key = model_config["api_key"].get<std::string>(); std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Authorization"] = "Bearer " + api_key; headers["Content-Type"] = "application/json"; nlohmann::json req_body; req_body["model"] = model_name; req_body["messages"] = nlohmann::json::array(); std::string res; std::string standalone_question = STANDALONE_QUESTION_PROMPT; standalone_question += "\n\n<Conversation history>\n"; auto conversation = conversation_history["conversation"]; auto max_conversation_length = model_config["max_bytes"].get<size_t>() - min_required_bytes; auto truncate_conversation_op = ConversationManager::get_instance().truncate_conversation(conversation, max_conversation_length); if(!truncate_conversation_op.ok()) { return Option<std::string>(400, truncate_conversation_op.error()); } auto truncated_conversation = truncate_conversation_op.get(); for(auto& message : truncated_conversation) { if(message.count("user") == 0 && message.count("assistant") == 0) { return Option<std::string>(400, "Conversation history is not valid"); } standalone_question += message.dump(0) + "\n"; } standalone_question += "\n\n<Question>\n" + question; standalone_question += "\n\n<Standalone question>\n"; nlohmann::json message = nlohmann::json::object(); message["role"] = "user"; message["content"] = standalone_question; req_body["messages"].push_back(message); auto res_code = RemoteEmbedder::call_remote_api("POST", OPENAI_CHAT_COMPLETION, req_body.dump(), res, res_headers, headers); if(res_code == 408) { return Option<std::string>(400, "OpenAI API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); } catch (const std::exception& e) { return Option<std::string>(400, "OpenAI API error: " + res); } if(json_res.count("error") == 0 || json_res["error"].count("message") == 0) { return Option<std::string>(400, "OpenAI API error: " + res); } return Option<std::string>(400, "OpenAI API error: " + nlohmann::json::parse(res)["error"]["message"].get<std::string>()); } nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); if(json_res.count("choices") == 0 || json_res["choices"].size() == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } if(json_res["choices"][0].count("message") == 0 || json_res["choices"][0]["message"].count("content") == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } } catch (const std::exception& e) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } return Option<std::string>(json_res["choices"][0]["message"]["content"].get<std::string>()); } Option<nlohmann::json> OpenAIConversationModel::format_question(const std::string& message) { nlohmann::json json = nlohmann::json::object(); json["user"] = message; return Option<nlohmann::json>(json); } Option<nlohmann::json> OpenAIConversationModel::format_answer(const std::string& message) { nlohmann::json json = nlohmann::json::object(); json["assistant"] = message; return Option<nlohmann::json>(json); } const std::string CFConversationModel::get_model_url(const std::string& model_name, const std::string& account_id) { return "https://api.cloudflare.com/client/v4/accounts/" + account_id + "/ai/run/" + model_name; } Option<bool> CFConversationModel::validate_model(const nlohmann::json& model_config) { if(model_config.count("api_key") == 0) { return Option<bool>(400, "API key is not provided"); } if(!model_config["api_key"].is_string()) { return Option<bool>(400, "API key is not a string"); } if(model_config.count("account_id") == 0) { return Option<bool>(400, "Account ID is not provided"); } if(!model_config["account_id"].is_string()) { return Option<bool>(400, "Account ID is not a string"); } auto model_name = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); bool found = false; std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Authorization"] = "Bearer " + model_config["api_key"].get<std::string>(); headers["Content-Type"] = "application/json"; std::string res; auto url = get_model_url(model_name, model_config["account_id"].get<std::string>()); nlohmann::json req_body; req_body["messages"] = R"([ { "role":"user", "content":"hello" } ])"_json; std::string chat_res; auto res_code = RemoteEmbedder::call_remote_api("POST", url, req_body.dump(), chat_res, res_headers, headers); if(res_code == 408) { return Option<bool>(408, "Cloudflare API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(chat_res); } catch (const std::exception& e) { return Option<bool>(400, "Cloudflare API error: " + chat_res); } if(json_res.count("errors") == 0 || json_res["errors"].size() == 0) { return Option<bool>(400, "Cloudflare API error: " + chat_res); } json_res = json_res["errors"][0]; return Option<bool>(400, "Cloudflare API error: " + json_res["message"].get<std::string>()); } return Option<bool>(true); } Option<std::string> CFConversationModel::get_answer(const std::string& context, const std::string& prompt, const std::string& system_prompt, const nlohmann::json& model_config) { const std::string model_name = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); const std::string api_key = model_config["api_key"].get<std::string>(); const std::string account_id = model_config["account_id"].get<std::string>(); std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Authorization"] = "Bearer " + api_key; headers["Content-Type"] = "application/json"; nlohmann::json req_body; req_body["stream"] = true; req_body["messages"] = nlohmann::json::array(); if(!system_prompt.empty()) { nlohmann::json system_message = nlohmann::json::object(); system_message["role"] = "system"; system_message["content"] = system_prompt; req_body["messages"].push_back(system_message); } nlohmann::json message = nlohmann::json::object(); message["role"] = "user"; message["content"] = CONTEXT_INFO + SPLITTER_STR + context + QUERY_STR + prompt + ANSWER_STR; req_body["messages"].push_back(message); std::string res; auto url = get_model_url(model_name, account_id); auto res_code = RemoteEmbedder::call_remote_api("POST_STREAM", url, req_body.dump(), res, res_headers, headers); if(res_code == 408) { return Option<std::string>(400, "Cloudflare API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); if(json_res.count("response") == 0 || json_res["response"].size() == 0) { return Option<std::string>(400, "Cloudflare API error: " + res); } json_res = nlohmann::json::parse(json_res["response"][0].get<std::string>()); } catch (const std::exception& e) { throw Option<std::string>(400, "Cloudflare API error: " + res); } if(json_res.count("errors") == 0 || json_res["errors"].size() == 0) { return Option<std::string>(400, "Cloudflare API error: " + json_res.dump(0)); } json_res = json_res["errors"][0]; return Option<std::string>(400, "Cloudflare API error: " + json_res["message"].get<std::string>()); } return parse_stream_response(res); } Option<std::string> CFConversationModel::get_standalone_question(const nlohmann::json& conversation_history, const std::string& question, const nlohmann::json& model_config) { const size_t min_required_bytes = CONVERSATION_HISTORY.size() + QUESTION.size() + STANDALONE_QUESTION_PROMPT.size() + question.size(); if(model_config["max_bytes"].get<size_t>() < min_required_bytes) { return Option<std::string>(400, "Max bytes is not enough to generate standalone question."); } const std::string model_name = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); const std::string api_key = model_config["api_key"].get<std::string>(); const std::string account_id = model_config["account_id"].get<std::string>(); std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Authorization"] = "Bearer " + api_key; headers["Content-Type"] = "application/json"; nlohmann::json req_body; req_body["stream"] = true; req_body["messages"] = nlohmann::json::array(); std::string res; std::string standalone_question = STANDALONE_QUESTION_PROMPT; auto conversation = conversation_history["conversation"]; auto max_conversation_length = model_config["max_bytes"].get<size_t>() - min_required_bytes; auto truncate_conversation_op = ConversationManager::get_instance().truncate_conversation(conversation, max_conversation_length); if(!truncate_conversation_op.ok()) { return Option<std::string>(400, "Conversation history is not valid"); } auto truncated_conversation = truncate_conversation_op.get(); for(auto& message : truncated_conversation) { if(message.count("user") == 0 && message.count("assistant") == 0) { return Option<std::string>(400, "Conversation history is not valid"); } standalone_question += message.dump(0) + "\n"; } standalone_question += "\n\n<Question>\n" + question; standalone_question += "\n\n<Standalone question>\n"; nlohmann::json message = nlohmann::json::object(); message["role"] = "user"; message["content"] = standalone_question; req_body["messages"].push_back(message); auto url = get_model_url(model_name, account_id); auto res_code = RemoteEmbedder::call_remote_api("POST_STREAM", url, req_body.dump(), res, res_headers, headers); if(res_code == 408) { return Option<std::string>(400, "Cloudflare API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); json_res = nlohmann::json::parse(json_res["response"].get<std::string>()); } catch (const std::exception& e) { return Option<std::string>(400, "Cloudflare API error: " + res); } if(json_res.count("errors") == 0 || json_res["errors"].size() == 0) { return Option<std::string>(400, "Cloudflare API error: " + json_res.dump(0)); } json_res = json_res["errors"][0]; return Option<std::string>(400, "Cloudflare API error: " + json_res["message"].get<std::string>()); } return parse_stream_response(res); } Option<nlohmann::json> CFConversationModel::format_question(const std::string& message) { nlohmann::json json = nlohmann::json::object(); json["user"] = message; return Option<nlohmann::json>(json); } Option<nlohmann::json> CFConversationModel::format_answer(const std::string& message) { nlohmann::json json = nlohmann::json::object(); json["assistant"] = message; return Option<nlohmann::json>(json); } Option<std::string> CFConversationModel::parse_stream_response(const std::string& res) { try { auto json_res = nlohmann::json::parse(res); std::string parsed_response = ""; std::vector<std::string> lines = json_res["response"].get<std::vector<std::string>>(); std::regex data_regex("data: (.*?)\\n\\n"); for(auto& line : lines) { auto begin = std::sregex_iterator(line.begin(), line.end(), data_regex); auto end = std::sregex_iterator(); for (std::sregex_iterator i = begin; i != end; ++i) { std::string substr_line = i->str().substr(6, i->str().size() - 8); if(substr_line.find("[DONE]") != std::string::npos) { break; } nlohmann::json json_line; json_line = nlohmann::json::parse(substr_line); parsed_response += json_line["response"]; } } return Option<std::string>(parsed_response); } catch (const std::exception& e) { LOG(ERROR) << e.what(); LOG(ERROR) << "Response: " << res; return Option<std::string>(400, "Got malformed response from Cloudflare API."); } } Option<bool> vLLMConversationModel::validate_model(const nlohmann::json& model_config) { if(model_config.count("vllm_url") == 0) { return Option<bool>(400, "vLLM URL is not provided"); } if(!model_config["vllm_url"].is_string()) { return Option<bool>(400, "vLLM URL is not a string"); } std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; std::string res; if(model_config.count("api_key") != 0) { if(!model_config["api_key"].is_string()) { return Option<bool>(400, "API key is not a string"); } headers["Authorization"] = "Bearer " + model_config["api_key"].get<std::string>(); } auto res_code = RemoteEmbedder::call_remote_api("GET", get_list_models_url(model_config["vllm_url"]), "", res, res_headers, headers); if(res_code == 408) { return Option<bool>(408, "vLLM API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); } catch (const std::exception& e) { return Option<bool>(400, "vLLM API error: " + res); } if(json_res.count("message") == 0) { return Option<bool>(400, "vLLM API error: " + res); } return Option<bool>(400, "vLLM API error: " + nlohmann::json::parse(res)["message"].get<std::string>()); } nlohmann::json models_json; try { models_json = nlohmann::json::parse(res); } catch (const std::exception& e) { return Option<bool>(400, "Got malformed response from vLLM API."); } bool found = false; // extract model name by removing "vLLM/" prefix auto model_name_without_namespace = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); for (auto& model : models_json["data"]) { if (model["id"] == model_name_without_namespace) { found = true; break; } } if(!found) { return Option<bool>(400, "Property `model_name` is not a valid vLLM model."); } nlohmann::json req_body; headers["Content-Type"] = "application/json"; req_body["model"] = model_name_without_namespace; req_body["messages"] = R"([ { "role":"user", "content":"hello" } ])"_json; std::string chat_res; res_code = RemoteEmbedder::call_remote_api("POST", get_chat_completion_url(model_config["vllm_url"]), req_body.dump(-1), chat_res, res_headers, headers); if(res_code == 408) { return Option<bool>(408, "vLLM API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); } catch (const std::exception& e) { return Option<bool>(400, "vLLM API error: " + res); } if(json_res.count("message") == 0) { return Option<bool>(400, "vLLM API error: " + res); } return Option<bool>(400, "vLLM API error: " + nlohmann::json::parse(res)["message"].get<std::string>()); } return Option<bool>(true); } Option<std::string> vLLMConversationModel::get_answer(const std::string& context, const std::string& prompt, const std::string& system_prompt, const nlohmann::json& model_config) { const std::string model_name = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); const std::string vllm_url = model_config["vllm_url"].get<std::string>(); std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Content-Type"] = "application/json"; nlohmann::json req_body; req_body["model"] = model_name; req_body["messages"] = nlohmann::json::array(); if(!system_prompt.empty()) { nlohmann::json system_message = nlohmann::json::object(); system_message["role"] = "system"; system_message["content"] = system_prompt; req_body["messages"].push_back(system_message); } nlohmann::json message = nlohmann::json::object(); message["role"] = "user"; message["content"] = DATA_STR + context + QUESTION_STR + prompt + ANSWER_STR; req_body["messages"].push_back(message); std::string res; if(model_config.count("api_key") != 0) { headers["Authorization"] = "Bearer " + model_config["api_key"].get<std::string>(); } auto res_code = RemoteEmbedder::call_remote_api("POST", get_chat_completion_url(vllm_url), req_body.dump(), res, res_headers, headers); if(res_code == 408) { throw Option<std::string>(400, "vLLM API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); } catch (const std::exception& e) { return Option<std::string>(400, "vLLM API error: " + res); } if(json_res.count("message") == 0) { return Option<std::string>(400, "vLLM API error: " + res); } return Option<std::string>(400, "vLLM API error: " + nlohmann::json::parse(res)["message"].get<std::string>()); } nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); if(json_res.count("choices") == 0 || json_res["choices"].size() == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } if(json_res["choices"][0].count("message") == 0 || json_res["choices"][0]["message"].count("content") == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } } catch (const std::exception& e) { throw Option<std::string>(400, "Got malformed response from vLLM API."); } return Option<std::string>(json_res["choices"][0]["message"]["content"].get<std::string>()); } Option<std::string> vLLMConversationModel::get_standalone_question(const nlohmann::json& conversation_history, const std::string& question, const nlohmann::json& model_config) { const size_t min_required_bytes = CONVERSATION_HISTORY.size() + QUESTION.size() + STANDALONE_QUESTION_PROMPT.size() + question.size(); if(model_config["max_bytes"].get<size_t>() < min_required_bytes) { return Option<std::string>(400, "Max bytes is not enough to generate standalone question."); } const std::string model_name = EmbedderManager::get_model_name_without_namespace(model_config["model_name"].get<std::string>()); const std::string vllm_url = model_config["vllm_url"].get<std::string>(); std::unordered_map<std::string, std::string> headers; std::map<std::string, std::string> res_headers; headers["Content-Type"] = "application/json"; nlohmann::json req_body; req_body["model"] = model_name; req_body["messages"] = nlohmann::json::array(); std::string res; std::string standalone_question = STANDALONE_QUESTION_PROMPT; auto conversation = conversation_history["conversation"]; auto max_conversation_length = model_config["max_bytes"].get<size_t>() - min_required_bytes; auto truncate_conversation_op = ConversationManager::get_instance().truncate_conversation(conversation, max_conversation_length); if(!truncate_conversation_op.ok()) { return Option<std::string>(400, "Conversation history is not valid"); } auto truncated_conversation = truncate_conversation_op.get(); for(auto& message : truncated_conversation) { if(message.count("user") == 0 && message.count("assistant") == 0) { return Option<std::string>(400, "Conversation history is not valid"); } standalone_question += message.dump(0) + "\n"; } standalone_question += "\n\n<Question>\n" + question; standalone_question += "\n\n<Standalone question>\n"; nlohmann::json message = nlohmann::json::object(); message["role"] = "user"; message["content"] = standalone_question; req_body["messages"].push_back(message); if(model_config.count("api_key") != 0) { headers["Authorization"] = "Bearer " + model_config["api_key"].get<std::string>(); } auto res_code = RemoteEmbedder::call_remote_api("POST", get_chat_completion_url(vllm_url), req_body.dump(), res, res_headers, headers); if(res_code == 408) { return Option<std::string>(400, "vLLM API timeout."); } if (res_code != 200) { nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); } catch (const std::exception& e) { return Option<std::string>(400, "vLLM API error: " + res); } if(json_res.count("message") == 0) { return Option<std::string>(400, "vLLM API error: " + res); } return Option<std::string>(400, "vLLM API error: " + nlohmann::json::parse(res)["message"].get<std::string>()); } nlohmann::json json_res; try { json_res = nlohmann::json::parse(res); if(json_res.count("choices") == 0 || json_res["choices"].size() == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } if(json_res["choices"][0].count("message") == 0 || json_res["choices"][0]["message"].count("content") == 0) { return Option<std::string>(400, "Got malformed response from OpenAI API."); } } catch (const std::exception& e) { return Option<std::string>(400, "Got malformed response from vLLM API."); } return Option<std::string>(json_res["choices"][0]["message"]["content"].get<std::string>()); } Option<nlohmann::json> vLLMConversationModel::format_question(const std::string& message) { nlohmann::json json = nlohmann::json::object(); json["user"] = message; return Option<nlohmann::json>(json); } Option<nlohmann::json> vLLMConversationModel::format_answer(const std::string& message) { nlohmann::json json = nlohmann::json::object(); json["assistant"] = message; return Option<nlohmann::json>(json); } const std::string vLLMConversationModel::get_list_models_url(const std::string& vllm_url) { return vllm_url.back() == '/' ? vllm_url + "v1/models" : vllm_url + "/v1/models"; } const std::string vLLMConversationModel::get_chat_completion_url(const std::string& vllm_url) { return vllm_url.back() == '/' ? vllm_url + "v1/chat/completions" : vllm_url + "/v1/chat/completions"; }
36,533
C++
.cpp
708
44.186441
178
0.632759
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,781
housekeeper.cpp
typesense_typesense/src/housekeeper.cpp
#include <map> #include <collection_manager.h> #include <system_metrics.h> #include "housekeeper.h" void HouseKeeper::run() { uint64_t prev_remove_expired_keys_s = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); uint64_t prev_db_compaction_s = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); uint64_t prev_memory_usage_s = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); while(!quit) { std::unique_lock lk(mutex); cv.wait_for(lk, std::chrono::milliseconds(3050), [&] { return quit.load(); }); if(quit) { lk.unlock(); break; } auto now_ts_seconds = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); // update system memory usage if (now_ts_seconds - prev_memory_usage_s >= memory_usage_interval_s) { active_memory_used = SystemMetrics::get_memory_active_bytes(); prev_memory_usage_s = now_ts_seconds; log_bad_queries(); } // perform compaction on underlying store if enabled if(Config::get_instance().get_db_compaction_interval() > 0) { if(now_ts_seconds - prev_db_compaction_s >= Config::get_instance().get_db_compaction_interval()) { LOG(INFO) << "Starting DB compaction."; CollectionManager::get_instance().get_store()->compact_all(); LOG(INFO) << "Finished DB compaction."; prev_db_compaction_s = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); } } if (now_ts_seconds - prev_remove_expired_keys_s >= remove_expired_keys_interval_s) { // Do housekeeping for authmanager CollectionManager::get_instance().getAuthManager().do_housekeeping(); prev_remove_expired_keys_s = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); } lk.unlock(); } } void HouseKeeper::stop() { quit = true; cv.notify_all(); } void HouseKeeper::init() { } uint64_t HouseKeeper::get_active_memory_used() { return active_memory_used; } void HouseKeeper::add_req(const std::shared_ptr<http_req>& req) { std::unique_lock ifq_lock(ifq_mutex); in_flight_queries.emplace(req->start_ts, req_metadata_t(req, get_active_memory_used())); } void HouseKeeper::remove_req(uint64_t req_id) { std::unique_lock ifq_lock(ifq_mutex); in_flight_queries.erase(req_id); } std::string HouseKeeper::get_query_log(const std::shared_ptr<http_req>& req) { std::string search_payload = req->body; StringUtils::erase_char(search_payload, '\n'); std::string query_string = "?"; for(const auto& param_kv: req->params) { if(param_kv.first != http_req::AUTH_HEADER && param_kv.first != http_req::USER_HEADER) { query_string += param_kv.first + "=" + param_kv.second + "&"; } } return std::string("id=") + std::to_string(req->start_ts) + ", qs=" + query_string + ", body=" + search_payload; } void HouseKeeper::log_running_queries() { std::unique_lock ifq_lock(ifq_mutex); if(in_flight_queries.empty()) { LOG(INFO) << "No in-flight search queries were found."; return ; } LOG(INFO) << "Dump of in-flight search queries:"; for(const auto& kv: in_flight_queries) { LOG(INFO) << get_query_log(kv.second.req); } } void HouseKeeper::log_bad_queries() { std::unique_lock ifq_lock(ifq_mutex); auto now_ts_seconds = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); for(auto& kv: in_flight_queries) { auto req_ts = kv.first; if(now_ts_seconds - req_ts < memory_req_min_age_s) { // since we use a map it's already ordered ascending on timestamp break; } if(kv.second.already_logged) { continue; } // query that's atleast 10 seconds old: check if memory difference exceeds 1 GB int64_t memory_req_start = kv.second.active_memory; int64_t curr_memory = active_memory_used; int64_t memory_diff = curr_memory - memory_req_start; const int64_t one_gb = 1073741824; if(memory_diff > one_gb) { LOG(INFO) << "Detected bad query, start_ts: " << req_ts << ", memory_diff: " << memory_diff << ", " << get_query_log(kv.second.req); kv.second.already_logged = true; } } }
4,904
C++
.cpp
109
36.889908
116
0.608559
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,782
file_utils.cpp
typesense_typesense/src/file_utils.cpp
#include <butil/file_util.h> #include <butil/files/file_enumerator.h> #include <butil/string_printf.h> #include <file_utils.h> bool directory_exists(const std::string& dir_path) { struct stat info; return stat(dir_path.c_str(), &info) == 0 && (info.st_mode & S_IFDIR); } bool create_directory(const std::string& dir_path) { return butil::CreateDirectory(butil::FilePath(dir_path)); } bool file_exists(const std::string & file_path) { struct stat info; return stat(file_path.c_str(), &info) == 0 && !(info.st_mode & S_IFDIR); } // tries to hard link first bool copy_dir(const std::string& from_path, const std::string& to_path) { struct stat from_stat; if (stat(from_path.c_str(), &from_stat) < 0 || !S_ISDIR(from_stat.st_mode)) { LOG(WARNING) << "stat " << from_path << " failed"; return false; } if (!butil::CreateDirectory(butil::FilePath(to_path))) { LOG(WARNING) << "CreateDirectory " << to_path << " failed"; return false; } butil::FileEnumerator dir_enum(butil::FilePath(from_path),false, butil::FileEnumerator::FILES); for (butil::FilePath name = dir_enum.Next(); !name.empty(); name = dir_enum.Next()) { std::string src_file(from_path); std::string dst_file(to_path); butil::string_appendf(&src_file, "/%s", name.BaseName().value().c_str()); butil::string_appendf(&dst_file, "/%s", name.BaseName().value().c_str()); if (0 != link(src_file.c_str(), dst_file.c_str())) { if (!butil::CopyFile(butil::FilePath(src_file), butil::FilePath(dst_file))) { LOG(WARNING) << "copy " << src_file << " to " << dst_file << " failed"; return false; } } } return true; } bool mv_dir(const std::string& from_path, const std::string& to_path) { struct stat from_stat; if (stat(from_path.c_str(), &from_stat) < 0 || !S_ISDIR(from_stat.st_mode)) { LOG(WARNING) << "stat " << from_path << " failed"; return false; } if (!butil::CreateDirectory(butil::FilePath(to_path))) { LOG(WARNING) << "CreateDirectory " << to_path << " failed"; return false; } butil::FileEnumerator file_enum(butil::FilePath(from_path), false, butil::FileEnumerator::FILES | butil::FileEnumerator::DIRECTORIES); for (butil::FilePath name = file_enum.Next(); !name.empty(); name = file_enum.Next()) { std::string src_file(from_path); std::string dst_file(to_path); if(name.value() == to_path) { // handle edge case when moving a directory into a subdirectory continue; } butil::string_appendf(&src_file, "/%s", name.BaseName().value().c_str()); butil::string_appendf(&dst_file, "/%s", name.BaseName().value().c_str()); butil::File::Error error; if (!butil::ReplaceFile(butil::FilePath(src_file), butil::FilePath(dst_file), &error)) { LOG(WARNING) << "move " << src_file << " to " << dst_file << " failed: " << error; return false; } } return true; } bool rename_path(const std::string& from_path, const std::string& to_path) { return butil::Move(butil::FilePath(from_path), butil::FilePath(to_path)); } bool delete_path(const std::string& path, bool recursive) { return butil::DeleteFile(butil::FilePath(path), recursive); } bool dir_enum_count(const std::string &path) { size_t count = 0; butil::FileEnumerator file_enum(butil::FilePath(path), false, butil::FileEnumerator::FILES | butil::FileEnumerator::DIRECTORIES); for (butil::FilePath name = file_enum.Next(); !name.empty(); name = file_enum.Next()) { count++; } return count; }
3,871
C++
.cpp
85
37.729412
109
0.595112
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,783
join.cpp
typesense_typesense/src/join.cpp
#include "join.h" #include <collection_manager.h> #include "collection.h" #include "logger.h" #include <timsort.hpp> Option<bool> single_value_filter_query(nlohmann::json& document, const std::string& field_name, const std::string& ref_field_type, std::string& filter_value) { auto const& json_value = document[field_name]; if (json_value.is_null()) { return Option<bool>(422, "Field `" + field_name + "` cannot have `null` value."); } if (json_value.is_string() && ref_field_type == field_types::STRING) { std::string value = json_value.get<std::string>(); if (value.empty()) { return Option<bool>(400, "Error with field `" + field_name + "`: Value cannot be empty."); } // Special symbols are ignored when enclosed inside backticks. bool is_backtick_present = false; bool special_symbols_present = false; bool in_backtick = false; auto const size = value.size(); for (size_t i = 0; i < size; i++) { auto c = value[i]; if (c == '`') { in_backtick = !in_backtick; is_backtick_present = true; } else if (!in_backtick && (c == '(' || c == ')' || (c == '&' && i + 1 < size && value[i + 1] == '&') || (c == '|' && i + 1 < size && value[i + 1] == '|'))) { special_symbols_present = true; if (is_backtick_present) { break; } } } if (is_backtick_present && special_symbols_present) { // Value containing special symbols cannot be parsed. return Option<bool>(400, "Filter value `" + value + "` cannot be parsed."); } else if (!is_backtick_present) { value = "`" + json_value.get<std::string>() + "`"; } filter_value += value; } else if (json_value.is_number_integer() && (ref_field_type == field_types::INT64 || (ref_field_type == field_types::INT32 && StringUtils::is_int32_t(std::to_string(json_value.get<int64_t>()))))) { filter_value += std::to_string(json_value.get<int64_t>()); } else { return Option<bool>(400, "Field `" + field_name + "` must have `" + ref_field_type + "` value."); } return Option<bool>(true); } Option<bool> Join::add_reference_helper_fields(nlohmann::json& document, const tsl::htrie_map<char, field>& schema, const spp::sparse_hash_map<std::string, reference_info_t>& reference_fields, tsl::htrie_set<char>& object_reference_helper_fields, const bool& is_update) { tsl::htrie_set<char> flat_fields; if (!reference_fields.empty() && document.contains(".flat")) { for (const auto &item: document[".flat"].get<std::vector<std::string>>()) { flat_fields.insert(item); } } // Add reference helper fields in the document. for (auto const& pair: reference_fields) { auto field_name = pair.first; auto const reference_helper_field = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX; auto const& field = schema.at(field_name); auto const& optional = field.optional; auto const& is_async_reference = field.is_async_reference; // Strict checking for presence of non-optional reference field during indexing operation. auto is_required = !is_update && !optional; if (is_required && document.count(field_name) != 1) { return Option<bool>(400, "Missing the required reference field `" + field_name + "` in the document."); } else if (document.count(field_name) != 1) { if (is_update) { document[fields::reference_helper_fields] += reference_helper_field; } continue; } auto reference_pair = pair.second; auto reference_collection_name = reference_pair.collection; auto reference_field_name = reference_pair.field; auto& cm = CollectionManager::get_instance(); auto ref_collection = cm.get_collection(reference_collection_name); if (is_update && document.contains(reference_helper_field) && (!document[field_name].is_array() || document[field_name].size() == document[reference_helper_field].size())) { // No need to look up the reference collection since reference helper field is already populated. // Saves needless computation in cases where references are known beforehand. For example, when cascade // deleting the related docs. document[fields::reference_helper_fields] += reference_helper_field; continue; } if (ref_collection == nullptr && is_async_reference) { document[fields::reference_helper_fields] += reference_helper_field; if (document[field_name].is_array()) { document[reference_helper_field] = nlohmann::json::array(); // Having the same number of values makes it easier to update the references in the future. document[reference_helper_field].insert(document[reference_helper_field].begin(), document[field_name].size(), Index::reference_helper_sentinel_value); } else { document[reference_helper_field] = Index::reference_helper_sentinel_value; } continue; } else if (ref_collection == nullptr) { return Option<bool>(400, "Referenced collection `" + reference_collection_name + "` not found."); } bool is_object_reference_field = flat_fields.count(field_name) != 0; std::string object_key; bool is_object_array = false; if (is_object_reference_field) { object_reference_helper_fields.insert(reference_helper_field); std::vector<std::string> tokens; StringUtils::split(field_name, tokens, "."); if (schema.count(tokens[0]) == 0) { return Option<bool>(400, "Could not find `" + tokens[0] + "` object/object[] field in the schema."); } object_key = tokens[0]; is_object_array = schema.at(object_key).is_array(); } if (reference_field_name == "id") { auto id_field_type_error_op = Option<bool>(400, "Field `" + field_name + "` must have string value."); if (is_object_array) { if (!document[field_name].is_array()) { return Option<bool>(400, "Expected `" + field_name + "` to be an array."); } document[reference_helper_field] = nlohmann::json::array(); document[fields::reference_helper_fields] += reference_helper_field; std::vector<std::string> keys; StringUtils::split(field_name, keys, "."); auto const& object_array = document[keys[0]]; for (uint32_t i = 0; i < object_array.size(); i++) { if (optional && object_array[i].count(keys[1]) == 0) { continue; } else if (object_array[i].count(keys[1]) == 0) { return Option<bool>(400, "Object at index `" + std::to_string(i) + "` is missing `" + field_name + "`."); } else if (!object_array[i].at(keys[1]).is_string()) { return id_field_type_error_op; } auto id = object_array[i].at(keys[1]).get<std::string>(); auto ref_doc_id_op = ref_collection->doc_id_to_seq_id_with_lock(id); if (!ref_doc_id_op.ok() && is_async_reference) { auto const& value = nlohmann::json::array({i, Index::reference_helper_sentinel_value}); document[reference_helper_field] += value; } else if (!ref_doc_id_op.ok()) { return Option<bool>(400, "Referenced document having `id: " + id + "` not found in the collection `" += reference_collection_name + "`." ); } else { // Adding the index of the object along with referenced doc id to account for the scenario where a // reference field of an object array might be optional and missing. document[reference_helper_field] += nlohmann::json::array({i, ref_doc_id_op.get()}); } } } else if (document[field_name].is_array()) { document[reference_helper_field] = nlohmann::json::array(); document[fields::reference_helper_fields] += reference_helper_field; for (const auto &item: document[field_name].items()) { if (optional && item.value().is_null()) { continue; } else if (!item.value().is_string()) { return id_field_type_error_op; } auto id = item.value().get<std::string>(); auto ref_doc_id_op = ref_collection->doc_id_to_seq_id_with_lock(id); if (!ref_doc_id_op.ok() && is_async_reference) { document[reference_helper_field] += Index::reference_helper_sentinel_value; } else if (!ref_doc_id_op.ok()) { return Option<bool>(400, "Referenced document having `id: " + id + "` not found in the collection `" += reference_collection_name + "`." ); } else { document[reference_helper_field] += ref_doc_id_op.get(); } } } else if (document[field_name].is_string()) { document[fields::reference_helper_fields] += reference_helper_field; auto id = document[field_name].get<std::string>(); auto ref_doc_id_op = ref_collection->doc_id_to_seq_id_with_lock(id); if (!ref_doc_id_op.ok() && is_async_reference) { document[reference_helper_field] = Index::reference_helper_sentinel_value; } else if (!ref_doc_id_op.ok()) { return Option<bool>(400, "Referenced document having `id: " + id + "` not found in the collection `" += reference_collection_name + "`." ); } else { document[reference_helper_field] = ref_doc_id_op.get(); } } else if (optional && document[field_name].is_null()) { // Reference helper field should also be removed along with reference field. if (is_update) { document[reference_helper_field] = nullptr; } continue; } else { return id_field_type_error_op; } continue; } if (ref_collection->get_schema().count(reference_field_name) == 0) { return Option<bool>(400, "Referenced field `" + reference_field_name + "` not found in the collection `" += reference_collection_name + "`."); } auto const ref_field = ref_collection->get_schema().at(reference_field_name); if (!ref_field.index) { return Option<bool>(400, "Referenced field `" + reference_field_name + "` in the collection `" += reference_collection_name + "` must be indexed."); } std::string ref_field_type = ref_field.is_string() ? field_types::STRING : ref_field.is_int32() ? field_types::INT32 : ref_field.is_int64() ? field_types::INT64 : field_types::NIL; if (ref_field_type == field_types::NIL) { return Option<bool>(400, "Cannot add a reference to `" + reference_collection_name + "." += reference_field_name + "` of type `" += ref_field.type + "`."); } if (is_object_array) { if (!document[field_name].is_array()) { return Option<bool>(400, "Expected `" + field_name + "` to be an array."); } document[reference_helper_field] = nlohmann::json::array(); document[fields::reference_helper_fields] += reference_helper_field; nlohmann::json temp_doc; // To store singular values of `field_name` field. std::vector<std::string> keys; StringUtils::split(field_name, keys, "."); auto const& object_array = document[keys[0]]; for (uint32_t i = 0; i < object_array.size(); i++) { if (optional && object_array[i].count(keys[1]) == 0) { continue; } else if (object_array[i].count(keys[1]) == 0) { return Option<bool>(400, "Object at index `" + std::to_string(i) + "` is missing `" + field_name + "`."); } temp_doc[field_name] = object_array[i].at(keys[1]); std::string filter_query = reference_field_name + ":= "; auto single_value_filter_query_op = single_value_filter_query(temp_doc, field_name, ref_field_type, filter_query); if (!single_value_filter_query_op.ok()) { if (optional && single_value_filter_query_op.code() == 422) { continue; } return Option<bool>(400, single_value_filter_query_op.error()); } filter_result_t filter_result; auto filter_ids_op = ref_collection->get_filter_ids(filter_query, filter_result); if (!filter_ids_op.ok()) { return filter_ids_op; } if (filter_result.count == 0 && is_async_reference) { document[reference_helper_field] += nlohmann::json::array({i, Index::reference_helper_sentinel_value}); } else if (filter_result.count != 1) { // Constraints similar to foreign key apply here. The reference match must be unique and not null. return Option<bool>(400, filter_result.count < 1 ? "Reference document having `" + filter_query + "` not found in the collection `" += reference_collection_name + "`." : "Multiple documents having `" + filter_query + "` found in the collection `" += reference_collection_name + "`."); } else { // Adding the index of the object along with referenced doc id to account for the scenario where a // reference field of an object array might be optional and missing. document[reference_helper_field] += nlohmann::json::array({i, filter_result.docs[0]}); } } continue; } auto const is_reference_array_field = field.is_array(); std::vector<std::string> filter_values; if (is_reference_array_field) { if (document[field_name].is_null()) { document[reference_helper_field] = nlohmann::json::array(); document[fields::reference_helper_fields] += reference_helper_field; continue; } else if (!document[field_name].is_array()) { return Option<bool>(400, "Expected `" + field_name + "` to be an array."); } nlohmann::json temp_doc; for (size_t i = 0; i < document[field_name].size(); i++) { temp_doc[field_name] = document[field_name].at(i); std::string value; auto single_value_filter_query_op = single_value_filter_query(temp_doc, field_name, ref_field_type, value); if (!single_value_filter_query_op.ok()) { // We don't accept null value in an array of values. No need to handle 422 code. return single_value_filter_query_op; } filter_values.emplace_back(value); } document[reference_helper_field] = nlohmann::json::array(); document[fields::reference_helper_fields] += reference_helper_field; if (filter_values.empty()) { continue; } } else { std::string value; auto single_value_filter_query_op = single_value_filter_query(document, field_name, ref_field_type, value); if (!single_value_filter_query_op.ok()) { if (optional && single_value_filter_query_op.code() == 422) { // Reference helper field should also be removed along with reference field. if (is_update) { document[reference_helper_field] = nullptr; } continue; } return Option<bool>(400, single_value_filter_query_op.error()); } filter_values.emplace_back(value); document[fields::reference_helper_fields] += reference_helper_field; } for (const auto& filter_value: filter_values) { std::string filter_query = reference_field_name + (field.is_string() ? ":= " : ": ") += filter_value; filter_result_t filter_result; auto filter_ids_op = ref_collection->get_filter_ids(filter_query, filter_result); if (!filter_ids_op.ok()) { return filter_ids_op; } if (filter_result.count == 0 && is_async_reference) { if (is_reference_array_field) { document[reference_helper_field] += Index::reference_helper_sentinel_value; } else { document[reference_helper_field] = Index::reference_helper_sentinel_value; } } else if (filter_result.count != 1) { // Constraints similar to foreign key apply here. The reference match must be unique and not null. return Option<bool>(400, filter_result.count < 1 ? "Reference document having `" + filter_query + "` not found in the collection `" += reference_collection_name + "`." : "Multiple documents having `" + filter_query + "` found in the collection `" += reference_collection_name + "`."); } else { if (is_reference_array_field) { document[reference_helper_field] += filter_result.docs[0]; } else { document[reference_helper_field] = filter_result.docs[0]; } } } } return Option<bool>(true); } Option<bool> Join::prune_ref_doc(nlohmann::json& doc, const reference_filter_result_t& references, const tsl::htrie_set<char>& ref_include_fields_full, const tsl::htrie_set<char>& ref_exclude_fields_full, const bool& is_reference_array, const ref_include_exclude_fields& ref_include_exclude) { nlohmann::json original_doc; if (!ref_include_exclude.nested_join_includes.empty()) { original_doc = doc; } auto const& ref_collection_name = ref_include_exclude.collection_name; auto& cm = CollectionManager::get_instance(); auto ref_collection = cm.get_collection(ref_collection_name); if (ref_collection == nullptr) { return Option<bool>(400, "Referenced collection `" + ref_collection_name + "` in `include_fields` not found."); } auto const& alias = ref_include_exclude.alias; auto const& strategy = ref_include_exclude.strategy; auto error_prefix = "Referenced collection `" + ref_collection_name + "`: "; // One-to-one relation. if (strategy != ref_include::nest_array && !is_reference_array && references.count == 1) { auto ref_doc_seq_id = references.docs[0]; nlohmann::json ref_doc; auto get_doc_op = ref_collection->get_document_from_store(ref_doc_seq_id, ref_doc); if (!get_doc_op.ok()) { if (ref_doc_seq_id == Index::reference_helper_sentinel_value) { return Option<bool>(true); } return Option<bool>(get_doc_op.code(), error_prefix + get_doc_op.error()); } Collection::remove_flat_fields(ref_doc); Collection::remove_reference_helper_fields(ref_doc); auto prune_op = Collection::prune_doc(ref_doc, ref_include_fields_full, ref_exclude_fields_full); if (!prune_op.ok()) { return Option<bool>(prune_op.code(), error_prefix + prune_op.error()); } auto const key = alias.empty() ? ref_collection_name : alias; auto const& nest_ref_doc = (strategy == ref_include::nest); if (!ref_doc.empty()) { if (nest_ref_doc) { doc[key] = ref_doc; } else { if (!alias.empty()) { auto temp_doc = ref_doc; ref_doc.clear(); for (const auto &item: temp_doc.items()) { ref_doc[alias + item.key()] = item.value(); } } doc.update(ref_doc); } } // Include nested join references. if (!ref_include_exclude.nested_join_includes.empty()) { // Passing empty references in case the nested include collection is not joined, but it still can be included // if we have a reference to it. std::map<std::string, reference_filter_result_t> refs; auto nested_include_exclude_op = include_references(nest_ref_doc ? doc[key] : doc, ref_doc_seq_id, ref_collection.get(), references.coll_to_references == nullptr ? refs : references.coll_to_references[0], ref_include_exclude.nested_join_includes, original_doc); if (!nested_include_exclude_op.ok()) { return nested_include_exclude_op; } } return Option<bool>(true); } // One-to-many relation. for (uint32_t i = 0; i < references.count; i++) { auto ref_doc_seq_id = references.docs[i]; nlohmann::json ref_doc; std::string key; auto const& nest_ref_doc = (strategy == ref_include::nest || strategy == ref_include::nest_array); auto get_doc_op = ref_collection->get_document_from_store(ref_doc_seq_id, ref_doc); if (!get_doc_op.ok()) { // Referenced document is not yet indexed. if (ref_doc_seq_id == Index::reference_helper_sentinel_value) { continue; } return Option<bool>(get_doc_op.code(), error_prefix + get_doc_op.error()); } Collection::remove_flat_fields(ref_doc); Collection::remove_reference_helper_fields(ref_doc); auto prune_op = Collection::prune_doc(ref_doc, ref_include_fields_full, ref_exclude_fields_full); if (!prune_op.ok()) { return Option<bool>(prune_op.code(), error_prefix + prune_op.error()); } if (!ref_doc.empty()) { if (nest_ref_doc) { key = alias.empty() ? ref_collection_name : alias; if (doc.contains(key) && !doc[key].is_array()) { return Option<bool>(400, "Could not include the reference document of `" + ref_collection_name + "` collection. Expected `" += key + "` to be an array. Try " + (alias.empty() ? "adding an" : "renaming the") + " alias."); } doc[key] += ref_doc; } else { for (auto ref_doc_it = ref_doc.begin(); ref_doc_it != ref_doc.end(); ref_doc_it++) { auto const& ref_doc_key = ref_doc_it.key(); key = alias + ref_doc_key; if (doc.contains(key) && !doc[key].is_array()) { return Option<bool>(400, "Could not include the value of `" + ref_doc_key + "` key of the reference document of `" += ref_collection_name + "` collection. Expected `" += key + "` to be an array. Try " + (alias.empty() ? "adding an" : "renaming the") + " alias."); } // Add the values of ref_doc as JSON array into doc. doc[key] += ref_doc_it.value(); } } } // Include nested join references. if (!ref_include_exclude.nested_join_includes.empty()) { // Passing empty references in case the nested include collection is not joined, but it still can be included // if we have a reference to it. std::map<std::string, reference_filter_result_t> refs; auto nested_include_exclude_op = include_references(nest_ref_doc ? doc[key].at(i) : doc, ref_doc_seq_id, ref_collection.get(), references.coll_to_references == nullptr ? refs : references.coll_to_references[i], ref_include_exclude.nested_join_includes, original_doc); if (!nested_include_exclude_op.ok()) { return nested_include_exclude_op; } } } return Option<bool>(true); } Option<bool> Join::include_references(nlohmann::json& doc, const uint32_t& seq_id, Collection *const collection, const std::map<std::string, reference_filter_result_t>& reference_filter_results, const std::vector<ref_include_exclude_fields>& ref_include_exclude_fields_vec, const nlohmann::json& original_doc) { for (auto const& ref_include_exclude: ref_include_exclude_fields_vec) { auto ref_collection_name = ref_include_exclude.collection_name; auto& cm = CollectionManager::get_instance(); auto ref_collection = cm.get_collection(ref_collection_name); if (ref_collection == nullptr) { return Option<bool>(400, "Referenced collection `" + ref_collection_name + "` in `include_fields` not found."); } // `CollectionManager::get_collection` accounts for collection alias being used and provides pointer to the // original collection. ref_collection_name = ref_collection->get_name(); auto const joined_on_ref_collection = reference_filter_results.count(ref_collection_name) > 0, has_filter_reference = (joined_on_ref_collection && reference_filter_results.at(ref_collection_name).count > 0); auto doc_has_reference = false, joined_coll_has_reference = false; // Reference include_by without join, check if doc itself contains the reference. if (!joined_on_ref_collection && collection != nullptr) { doc_has_reference = ref_collection->is_referenced_in(collection->get_name()); } std::string joined_coll_having_reference; // Check if the joined collection has a reference. if (!joined_on_ref_collection && !doc_has_reference) { for (const auto &reference_filter_result: reference_filter_results) { joined_coll_has_reference = ref_collection->is_referenced_in(reference_filter_result.first); if (joined_coll_has_reference) { joined_coll_having_reference = reference_filter_result.first; break; } } } if (!has_filter_reference && !doc_has_reference && !joined_coll_has_reference) { continue; } std::vector<std::string> ref_include_fields_vec, ref_exclude_fields_vec; StringUtils::split(ref_include_exclude.include_fields, ref_include_fields_vec, ","); StringUtils::split(ref_include_exclude.exclude_fields, ref_exclude_fields_vec, ","); spp::sparse_hash_set<std::string> ref_include_fields, ref_exclude_fields; ref_include_fields.insert(ref_include_fields_vec.begin(), ref_include_fields_vec.end()); ref_exclude_fields.insert(ref_exclude_fields_vec.begin(), ref_exclude_fields_vec.end()); tsl::htrie_set<char> ref_include_fields_full, ref_exclude_fields_full; auto include_exclude_op = ref_collection->populate_include_exclude_fields_lk(ref_include_fields, ref_exclude_fields, ref_include_fields_full, ref_exclude_fields_full); auto error_prefix = "Referenced collection `" + ref_collection_name + "`: "; if (!include_exclude_op.ok()) { return Option<bool>(include_exclude_op.code(), error_prefix + include_exclude_op.error()); } Option<bool> prune_doc_op = Option<bool>(true); auto const& ref_collection_alias = ref_include_exclude.alias; if (has_filter_reference) { auto const& ref_filter_result = reference_filter_results.at(ref_collection_name); prune_doc_op = prune_ref_doc(doc, ref_filter_result, ref_include_fields_full, ref_exclude_fields_full, ref_filter_result.is_reference_array_field, ref_include_exclude); } else if (doc_has_reference) { auto get_reference_field_op = ref_collection->get_referenced_in_field_with_lock(collection->get_name()); if (!get_reference_field_op.ok()) { continue; } auto const& field_name = get_reference_field_op.get(); auto const& reference_helper_field_name = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX; if (collection->get_schema().count(reference_helper_field_name) == 0) { continue; } if (collection->get_object_reference_helper_fields().count(field_name) != 0) { std::vector<std::string> keys; StringUtils::split(field_name, keys, "."); auto const& key = keys[0]; if (!doc.contains(key)) { if (!original_doc.contains(key)) { auto const& schema = collection->get_schema(); auto it = schema.find(field_name); if (it == schema.end() || it->optional) { continue; } return Option<bool>(400, "Could not find `" + key + "` key in the document to include the referenced document."); } // The key is excluded from the doc by the query, inserting empty object(s) so referenced doc can be // included in it. if (original_doc[key].is_array()) { doc[key] = nlohmann::json::array(); doc[key].insert(doc[key].begin(), original_doc[key].size(), nlohmann::json::object()); } else { doc[key] = nlohmann::json::object(); } } if (doc[key].is_array()) { for (uint32_t i = 0; i < doc[key].size(); i++) { uint32_t ref_doc_id; auto op = collection->get_object_array_related_id(reference_helper_field_name, seq_id, i, ref_doc_id); if (!op.ok()) { if (op.code() == 404) { // field_name is not indexed. break; } else { // No reference found for this object. continue; } } reference_filter_result_t result(1, new uint32_t[1]{ref_doc_id}); prune_doc_op = prune_ref_doc(doc[key][i], result, ref_include_fields_full, ref_exclude_fields_full, false, ref_include_exclude); if (!prune_doc_op.ok()) { return prune_doc_op; } } } else { std::vector<uint32_t> ids; auto get_references_op = collection->get_related_ids(field_name, seq_id, ids); if (!get_references_op.ok()) { LOG(ERROR) << "Error while getting related ids: " + get_references_op.error(); continue; } reference_filter_result_t result(ids.size(), &ids[0]); prune_doc_op = prune_ref_doc(doc[key], result, ref_include_fields_full, ref_exclude_fields_full, collection->get_schema().at(field_name).is_array(), ref_include_exclude); result.docs = nullptr; } } else { std::vector<uint32_t> ids; auto get_references_op = collection->get_related_ids(field_name, seq_id, ids); if (!get_references_op.ok()) { LOG(ERROR) << "Error while getting related ids: " + get_references_op.error(); continue; } reference_filter_result_t result(ids.size(), &ids[0]); prune_doc_op = prune_ref_doc(doc, result, ref_include_fields_full, ref_exclude_fields_full, collection->get_schema().at(field_name).is_array(), ref_include_exclude); result.docs = nullptr; } } else if (joined_coll_has_reference) { auto joined_collection = cm.get_collection(joined_coll_having_reference); if (joined_collection == nullptr) { continue; } auto reference_field_name_op = ref_collection->get_referenced_in_field_with_lock(joined_coll_having_reference); if (!reference_field_name_op.ok() || joined_collection->get_schema().count(reference_field_name_op.get()) == 0) { continue; } auto const& reference_field_name = reference_field_name_op.get(); auto const& reference_filter_result = reference_filter_results.at(joined_coll_having_reference); auto const& count = reference_filter_result.count; std::vector<uint32_t> ids; ids.reserve(count); for (uint32_t i = 0; i < count; i++) { joined_collection->get_related_ids_with_lock(reference_field_name, reference_filter_result.docs[i], ids); } if (ids.empty()) { continue; } gfx::timsort(ids.begin(), ids.end()); ids.erase(unique(ids.begin(), ids.end()), ids.end()); reference_filter_result_t result; result.count = ids.size(); result.docs = &ids[0]; prune_doc_op = prune_ref_doc(doc, result, ref_include_fields_full, ref_exclude_fields_full, joined_collection->get_schema().at(reference_field_name).is_array(), ref_include_exclude); result.docs = nullptr; } if (!prune_doc_op.ok()) { return prune_doc_op; } } return Option<bool>(true); } Option<bool> parse_reference_filter_helper(const std::string& filter_query, size_t& index, std::string& ref_coll_name, std::string& join) { auto error = Option<bool>(400, "Could not parse the reference filter: `" + filter_query.substr(index) + "`."); if (index >= filter_query.size() || filter_query[index] != '$') { return error; } auto const start_index = index; auto size = filter_query.size(); auto parenthesis_pos = filter_query.find('(', index + 1); if (parenthesis_pos == std::string::npos) { return error; } index = parenthesis_pos; ref_coll_name = filter_query.substr(start_index + 1, parenthesis_pos - start_index - 1); StringUtils::trim(ref_coll_name); // The reference filter could have parenthesis inside it. $Foo((X && Y) || Z) int parenthesis_count = 1; while (++index < size && parenthesis_count > 0) { if (filter_query[index] == '(') { parenthesis_count++; } else if (filter_query[index] == ')') { parenthesis_count--; } } if (parenthesis_count != 0) { return error; } join = filter_query.substr(start_index, index - start_index); return Option<bool>(true); } Option<bool> Join::parse_reference_filter(const std::string& filter_query, std::queue<std::string>& tokens, size_t& index, std::set<std::string>& ref_collection_names) { std::string ref_coll_name, join; auto parse_op = parse_reference_filter_helper(filter_query, index, ref_coll_name, join); if (!parse_op.ok()) { return parse_op; } auto it = ref_collection_names.find(ref_coll_name); if (it == ref_collection_names.end()) { ref_collection_names.insert(ref_coll_name); } else { return Option<bool>(400, "More than one joins found for collection `" + ref_coll_name + "` in the `filter_by`." += " Instead of providing separate join conditions like " "`$customer_product_prices(customer_id:=customer_a) && " "$customer_product_prices(custom_price:<100)`," " the join condition should be provided as a single filter expression like" " `$customer_product_prices(customer_id:=customer_a && custom_price:<100)`"); } tokens.push(join); return Option<bool>(true); } Option<bool> Join::split_reference_include_exclude_fields(const std::string& include_exclude_fields, size_t& index, std::string& token) { auto ref_include_error = Option<bool>(400, "Invalid reference `" + include_exclude_fields + "` in include_fields/" "exclude_fields, expected `$CollectionName(fieldA, ...)`."); auto const& size = include_exclude_fields.size(); size_t start_index = index; while(++index < size && include_exclude_fields[index] != '(') {} if (index >= size) { return ref_include_error; } // In case of nested join, the reference include/exclude field could have parenthesis inside it. int parenthesis_count = 1; while (++index < size && parenthesis_count > 0) { if (include_exclude_fields[index] == '(') { parenthesis_count++; } else if (include_exclude_fields[index] == ')') { parenthesis_count--; } } if (parenthesis_count != 0) { return ref_include_error; } // In case of nested reference include, we might end up with one of the following scenarios: // $ref_include( $nested_ref_include(foo, strategy:merge)as nest ) as ref // ...^ // $ref_include( $nested_ref_include(foo, strategy:merge)as nest, bar ) as ref // ...^ auto closing_parenthesis_pos = include_exclude_fields.find(')', index); auto comma_pos = include_exclude_fields.find(',', index); auto alias_start_pos = include_exclude_fields.find(" as ", index); auto alias_end_pos = std::min(closing_parenthesis_pos, comma_pos); std::string alias; if (alias_start_pos != std::string::npos && alias_start_pos < alias_end_pos) { alias = include_exclude_fields.substr(alias_start_pos, alias_end_pos - alias_start_pos); } token = include_exclude_fields.substr(start_index, index - start_index) + " " + StringUtils::trim(alias); StringUtils::trim(token); index = alias_end_pos; return Option<bool>(true); } // Returns tri-state: Error while parsing filter_query (-1), Join not found (0), Join found (1) int8_t skip_index_to_join(const std::string& filter_query, size_t& i) { auto const size = filter_query.size(); while (i < size) { auto c = filter_query[i]; if (c == ' ' || c == '(' || c == ')') { i++; } else if (c == '&' || c == '|') { if (i + 1 >= size || (c == '&' && filter_query[i + 1] != '&') || (c == '|' && filter_query[i + 1] != '|')) { return -1; } i += 2; } else { // Reference filter would start with $ symbol. if (c == '$') { return 1; } else { while (i + 1 < size && filter_query[++i] != ':'); if (i >= size) { return -1; } bool in_backtick = false; do { c = filter_query[++i]; if (c == '`') { in_backtick = !in_backtick; } } while (i < size && (in_backtick || (c != '(' && c != ')' && !(c == '&' && filter_query[i + 1] == '&') && !(c == '|' && filter_query[i + 1] == '|')))); } } } return 0; } void Join::get_reference_collection_names(const std::string& filter_query, ref_include_collection_names_t*& ref_include) { if (ref_include == nullptr) { ref_include = new ref_include_collection_names_t(); } auto size = filter_query.size(); for (size_t i = 0; i < size;) { auto const result = skip_index_to_join(filter_query, i); if (result == -1) { ref_include->collection_names.clear(); return; } else if (result == 0) { break; } auto c = filter_query[i]; // Reference filter would start with $ symbol. if (c == '$') { auto open_paren_pos = filter_query.find('(', ++i); if (open_paren_pos == std::string::npos) { ref_include->collection_names.clear(); return; } auto reference_collection_name = filter_query.substr(i, open_paren_pos - i); StringUtils::trim(reference_collection_name); if (!reference_collection_name.empty()) { ref_include->collection_names.insert(reference_collection_name); } i = open_paren_pos; int parenthesis_count = 1; while (++i < size && parenthesis_count > 0) { if (filter_query[i] == '(') { parenthesis_count++; } else if (filter_query[i] == ')') { parenthesis_count--; } } if (parenthesis_count != 0) { ref_include->collection_names.clear(); return; } // Need to process the filter expression inside parenthesis in case of nested join. auto sub_filter_query = filter_query.substr(open_paren_pos + 1, i - open_paren_pos - 2); if (sub_filter_query.find('$') != std::string::npos) { get_reference_collection_names(sub_filter_query, ref_include->nested_include); } } } } Option<bool> parse_nested_exclude(const std::string& exclude_field_exp, std::unordered_map<std::string, std::string>& ref_excludes) { // Format: $ref_collection_name(field_1, field_2, $nested_ref_coll(nested_field_1)) size_t index = 0; while (index < exclude_field_exp.size()) { auto parenthesis_index = exclude_field_exp.find('('); auto ref_collection_name = exclude_field_exp.substr(index + 1, parenthesis_index - index - 1); std::string ref_fields; index = parenthesis_index + 1; auto nested_exclude_pos = exclude_field_exp.find('$', parenthesis_index); auto closing_parenthesis_pos = exclude_field_exp.find(')', parenthesis_index); size_t comma_pos; if (nested_exclude_pos < closing_parenthesis_pos) { // Nested reference exclude. // "... $product_variants(title, $inventory(qty)) ..." do { ref_fields += exclude_field_exp.substr(index, nested_exclude_pos - index); StringUtils::trim(ref_fields); index = nested_exclude_pos; std::string nested_exclude_field_exp; auto split_op = Join::split_reference_include_exclude_fields(exclude_field_exp, index, nested_exclude_field_exp); if (!split_op.ok()) { return split_op; } auto parse_op = parse_nested_exclude(nested_exclude_field_exp, ref_excludes); if (!parse_op.ok()) { return parse_op; } nested_exclude_pos = exclude_field_exp.find('$', index); closing_parenthesis_pos = exclude_field_exp.find(')', index); comma_pos = exclude_field_exp.find(',', index); index = std::min(closing_parenthesis_pos, comma_pos) + 1; } while (index < exclude_field_exp.size() && nested_exclude_pos < closing_parenthesis_pos); } // ... $inventory(qty) ... if (index < closing_parenthesis_pos) { ref_fields += exclude_field_exp.substr(index, closing_parenthesis_pos - index); } StringUtils::trim(ref_fields); ref_excludes[ref_collection_name] = ref_fields; index = closing_parenthesis_pos + 1; } return Option<bool>(true); } Option<bool> parse_ref_include_parameters(const std::string& include_field_exp, const std::string& parameters, ref_include::strategy_enum& strategy_enum) { std::vector<std::string> parameters_map; StringUtils::split(parameters, parameters_map, ","); for (const auto &item: parameters_map) { std::vector<std::string> parameter_pair; StringUtils::split(item, parameter_pair, ":"); if (parameter_pair.size() != 2) { continue; } auto const& key = StringUtils::trim(parameter_pair[0]); if (key == ref_include::strategy_key) { auto const& include_strategy = StringUtils::trim(parameter_pair[1]); auto string_to_enum_op = ref_include::string_to_enum(include_strategy); if (!string_to_enum_op.ok()) { return Option<bool>(400, "Error parsing `" + include_field_exp + "`: " + string_to_enum_op.error()); } strategy_enum = string_to_enum_op.get(); } else { return Option<bool>(400, "Unknown reference `include_fields` parameter: `" + key + "`."); } } return Option<bool>(true); } Option<bool> parse_nested_include(const std::string& include_field_exp, ref_include_collection_names_t* const ref_include_coll_names, std::vector<ref_include_exclude_fields>& ref_include_exclude_fields_vec) { // Format: $ref_collection_name(field_1, field_2, $nested_ref_coll(nested_field_1, strategy: nested_include_strategy) as nested_ref_alias, strategy: include_strategy) as ref_alias size_t index = 0; while (index < include_field_exp.size()) { auto parenthesis_index = include_field_exp.find('('); auto ref_collection_name = include_field_exp.substr(index + 1, parenthesis_index - index - 1); bool nest_ref_doc = true; std::string ref_fields, ref_alias; index = parenthesis_index + 1; auto nested_include_pos = include_field_exp.find('$', parenthesis_index); auto closing_parenthesis_pos = include_field_exp.find(')', parenthesis_index); auto colon_pos = include_field_exp.find(':', index); size_t comma_pos; std::vector<ref_include_exclude_fields> nested_ref_include_exclude_fields_vec; if (nested_include_pos < closing_parenthesis_pos) { // Nested reference include. // "... $product_variants(title, $inventory(qty, strategy:merge) as inventory, strategy :nest) as variants ..." do { ref_fields += include_field_exp.substr(index, nested_include_pos - index); StringUtils::trim(ref_fields); index = nested_include_pos; std::string nested_include_field_exp; auto split_op = Join::split_reference_include_exclude_fields(include_field_exp, index, nested_include_field_exp); if (!split_op.ok()) { return split_op; } auto parse_op = parse_nested_include(nested_include_field_exp, ref_include_coll_names == nullptr ? nullptr : ref_include_coll_names->nested_include, nested_ref_include_exclude_fields_vec); if (!parse_op.ok()) { return parse_op; } nested_include_pos = include_field_exp.find('$', index); closing_parenthesis_pos = include_field_exp.find(')', index); colon_pos = include_field_exp.find(':', index); comma_pos = include_field_exp.find(',', index); index = std::min(std::min(closing_parenthesis_pos, colon_pos), comma_pos) + 1; } while(index < include_field_exp.size() && nested_include_pos < closing_parenthesis_pos); } if (index < closing_parenthesis_pos) { ref_fields += include_field_exp.substr(index, closing_parenthesis_pos - index); } index = closing_parenthesis_pos; // ... $inventory(qty, strategy:merge) as inventory auto strategy_enum = ref_include::nest; if (colon_pos < closing_parenthesis_pos) { auto const& parameters_start = ref_fields.rfind(',', colon_pos); std::string parameters; if (parameters_start == std::string::npos) { parameters = ref_fields; ref_fields.clear(); } else { parameters = ref_fields.substr(parameters_start + 1); ref_fields = ref_fields.substr(0, parameters_start); } auto parse_params_op = parse_ref_include_parameters(include_field_exp, parameters, strategy_enum); if (!parse_params_op.ok()) { return parse_params_op; } } StringUtils::trim(ref_fields); auto as_pos = include_field_exp.find(" as ", index); comma_pos = include_field_exp.find(',', index); if (as_pos != std::string::npos && as_pos < comma_pos) { ref_alias = include_field_exp.substr(as_pos + 4, comma_pos - as_pos - 4); } // For an alias `foo`, // In case of "merge" reference doc, we need append `foo.` to all the top level keys of reference doc. // In case of "nest" reference doc, `foo` becomes the key with reference doc as value. nest_ref_doc = strategy_enum == ref_include::nest || strategy_enum == ref_include::nest_array; ref_alias = !ref_alias.empty() ? (StringUtils::trim(ref_alias) + (nest_ref_doc ? "" : ".")) : ""; ref_include_exclude_fields_vec.emplace_back(ref_include_exclude_fields{ref_collection_name, ref_fields, "", ref_alias, strategy_enum}); ref_include_exclude_fields_vec.back().nested_join_includes = std::move(nested_ref_include_exclude_fields_vec); // Referenced collection in filter_by is already mentioned in include_fields. if (ref_include_coll_names != nullptr) { ref_include_coll_names->collection_names.erase(ref_collection_name); } if (comma_pos == std::string::npos) { break; } index = comma_pos + 1; } return Option<bool>(true); } Option<bool> Join::initialize_ref_include_exclude_fields_vec(const std::string& filter_query, std::vector<std::string>& include_fields_vec, std::vector<std::string>& exclude_fields_vec, std::vector<ref_include_exclude_fields>& ref_include_exclude_fields_vec) { ref_include_collection_names_t* ref_include_coll_names = nullptr; get_reference_collection_names(filter_query, ref_include_coll_names); std::unique_ptr<ref_include_collection_names_t> guard(ref_include_coll_names); std::vector<std::string> result_include_fields_vec; auto wildcard_include_all = true; for (auto const& include_field_exp: include_fields_vec) { if (include_field_exp[0] != '$') { if (include_field_exp == "*") { continue; } wildcard_include_all = false; result_include_fields_vec.emplace_back(include_field_exp); continue; } // Nested reference include. if (include_field_exp.find('$', 1) != std::string::npos) { auto parse_op = parse_nested_include(include_field_exp, ref_include_coll_names, ref_include_exclude_fields_vec); if (!parse_op.ok()) { return parse_op; } continue; } // Format: $ref_collection_name(field_1, field_2: include_strategy) as ref_alias auto as_pos = include_field_exp.find(" as "); auto ref_include = include_field_exp.substr(0, as_pos); auto alias = (as_pos == std::string::npos) ? "" : include_field_exp.substr(as_pos + 4, include_field_exp.size() - (as_pos + 4)); auto parenthesis_index = ref_include.find('('); auto ref_collection_name = ref_include.substr(1, parenthesis_index - 1); auto ref_fields = ref_include.substr(parenthesis_index + 1, ref_include.size() - parenthesis_index - 2); auto strategy_enum = ref_include::nest; auto colon_pos = ref_fields.find(':'); if (colon_pos != std::string::npos) { auto const& parameters_start = ref_fields.rfind(',', colon_pos); std::string parameters; if (parameters_start == std::string::npos) { parameters = ref_fields; ref_fields.clear(); } else { parameters = ref_fields.substr(parameters_start + 1); ref_fields = ref_fields.substr(0, parameters_start); } auto parse_params_op = parse_ref_include_parameters(include_field_exp, parameters, strategy_enum); if (!parse_params_op.ok()) { return parse_params_op; } } // For an alias `foo`, // In case of "merge" reference doc, we need append `foo.` to all the top level keys of reference doc. // In case of "nest" reference doc, `foo` becomes the key with reference doc as value. auto const& nest_ref_doc = strategy_enum == ref_include::nest || strategy_enum == ref_include::nest_array; auto ref_alias = !alias.empty() ? (StringUtils::trim(alias) + (nest_ref_doc ? "" : ".")) : ""; ref_include_exclude_fields_vec.emplace_back(ref_include_exclude_fields{ref_collection_name, ref_fields, "", ref_alias, strategy_enum}); // Referenced collection in filter_by is already mentioned in include_fields. if (ref_include_coll_names != nullptr) { ref_include_coll_names->collection_names.erase(ref_collection_name); } } // Get all the fields of the referenced collection mentioned in the filter_by but not in include_fields. auto references = std::ref(ref_include_exclude_fields_vec); while (ref_include_coll_names != nullptr) { for (const auto &reference_collection_name: ref_include_coll_names->collection_names) { references.get().emplace_back(ref_include_exclude_fields{reference_collection_name, "", "", ""}); } ref_include_coll_names = ref_include_coll_names->nested_include; if (references.get().empty()) { break; } references = std::ref(references.get().front().nested_join_includes); } std::unordered_map<std::string, std::string> ref_excludes; std::vector<std::string> result_exclude_fields_vec; for (const auto& exclude_field_exp: exclude_fields_vec) { if (exclude_field_exp[0] != '$') { result_exclude_fields_vec.emplace_back(exclude_field_exp); continue; } // Nested reference exclude. if (exclude_field_exp.find('$', 1) != std::string::npos) { auto parse_op = parse_nested_exclude(exclude_field_exp, ref_excludes); if (!parse_op.ok()) { return parse_op; } continue; } // Format: $ref_collection_name(field_1, field_2) auto parenthesis_index = exclude_field_exp.find('('); auto ref_collection_name = exclude_field_exp.substr(1, parenthesis_index - 1); auto ref_fields = exclude_field_exp.substr(parenthesis_index + 1, exclude_field_exp.size() - parenthesis_index - 2); if (!ref_fields.empty()) { ref_excludes[ref_collection_name] = ref_fields; } } if (!ref_excludes.empty()) { references = std::ref(ref_include_exclude_fields_vec); while (!references.get().empty()) { for (auto& ref_include_exclude: references.get()) { if (ref_excludes.count(ref_include_exclude.collection_name) == 0) { continue; } ref_include_exclude.exclude_fields = ref_excludes[ref_include_exclude.collection_name]; } references = std::ref(references.get().front().nested_join_includes); } } // Since no field of the collection being searched is mentioned in include_fields, include all the fields. if (wildcard_include_all) { result_include_fields_vec.clear(); } include_fields_vec = std::move(result_include_fields_vec); exclude_fields_vec = std::move(result_exclude_fields_vec); return Option<bool>(true); } // If joins to the same collection are found in both `embedded_filter` and `query_filter`, remove the join from // `embedded_filter` and merge its join condition with the `query_filter` join in the following manner: // `$JoinCollectionName((<embedded_join_condition>) && <query_join_condition>)` bool Join::merge_join_conditions(string& embedded_filter, string& query_filter) { std::unordered_map<std::string, std::string> coll_name_to_embedded_join; for (size_t i = 0; i < embedded_filter.size();) { auto const result = skip_index_to_join(embedded_filter, i); if (result == -1) { return false; } else if (result == 0) { break; } std::string ref_coll_name, join; if (!parse_reference_filter_helper(embedded_filter, i, ref_coll_name, join).ok()) { return false; } if (coll_name_to_embedded_join.find(ref_coll_name) != coll_name_to_embedded_join.end()) { // Multiple joins to the same collection found. return false; } coll_name_to_embedded_join[ref_coll_name] = join; } if (coll_name_to_embedded_join.empty()) { // No join found in the embedded filter_by. return true; } std::set<std::string> query_join_coll_names; for (size_t i = 0; i < query_filter.size();) { auto const result = skip_index_to_join(query_filter, i); if (result == -1) { return false; } else if (result == 0) { break; } // Merge join conditions auto const& join_start_index = i; auto const q_parenthesis_pos = query_filter.find('(', i + 1); if (q_parenthesis_pos == std::string::npos) { return false; } auto ref_coll_name = query_filter.substr(join_start_index + 1, q_parenthesis_pos - join_start_index - 1); StringUtils::trim(ref_coll_name); if (query_join_coll_names.find(ref_coll_name) != query_join_coll_names.end()) { // Multiple joins to the same collection found. return false; } auto it = coll_name_to_embedded_join.find(ref_coll_name); if (it != coll_name_to_embedded_join.end()) { auto const& embedded_join = it->second; auto const e_parenthesis_pos = embedded_join.find('('); if (e_parenthesis_pos == std::string::npos) { return false; } auto const embedded_join_condition = embedded_join.substr(e_parenthesis_pos + 1, embedded_join.size() - e_parenthesis_pos - 2); query_filter.insert(q_parenthesis_pos + 1, ("(" + embedded_join_condition + ") && ")); query_join_coll_names.insert(ref_coll_name); } std::string join; if (!parse_reference_filter_helper(query_filter, i, ref_coll_name, join).ok()) { return false; } } // Erase the embedded joins that were merged into query filter. for (const auto& ref_coll_name: query_join_coll_names) { auto it = coll_name_to_embedded_join.find(ref_coll_name); if (it == coll_name_to_embedded_join.end()) { return false; } auto const& embedded_join = it->second; // In a complex embedded filter expression, there can be following cases: // 1. (Join && ... / (Join || ... // 2. ... && Join) / ... || Join) // 3. ... && (Join) && ... / ... || (Join) || ... auto const& join_start_index = embedded_filter.find(embedded_join); if (join_start_index == std::string::npos) { return false; } // i and j point to start and end index of the join respectively. We will move i to left and j to right to probe // the embedded filter expression to check which case this join falls into. size_t i = join_start_index, j = join_start_index + embedded_join.size() - 1; while (i > 0 && embedded_filter[--i] == ' '); while (j < embedded_filter.size() && embedded_filter[++j] == ' '); if (i == 0 && j >= embedded_filter.size()) { // Embedded filter had only one expression. embedded_filter.clear(); continue; } bool is_join_enclosed = embedded_filter[i] == '(' && embedded_filter[j] == ')'; if (is_join_enclosed) { // ... ( Join ) ... // Still need to move both i and j. while (i > 0 && embedded_filter[--i] == ' '); while (j < embedded_filter.size() && embedded_filter[++j] == ' '); if (i == 0 && j >= embedded_filter.size()) { // Embedded join was enclosed within parentheses. embedded_filter.clear(); continue; } else if (embedded_filter[i] == '(' && embedded_filter[j] == ')') { // Join enclosed inside multiple parenthesis. return false; } } // Smallest filter expression will have both field name and value that are only 1 character long like, f:v // So there has to be at least 5 characters after join in a complex filter expression like, &&f:v or before // join like, f:v|| // Case 1. // Either join is the first expression in embedded filter like, Join ... // or it is the first expression in a sub-expression like, ... ( Join ... ) ... if ((i == 0 || embedded_filter[i] == '(') && j + 4 < embedded_filter.size()) { if ((embedded_filter[j] == '&' && embedded_filter[j + 1] == '&') || (embedded_filter[j] == '|' && embedded_filter[j + 1] == '|')) { j++; while (j < embedded_filter.size() && embedded_filter[++j] == ' '); (is_join_enclosed || embedded_filter[i] == '$') ? embedded_filter.erase(0, j) : embedded_filter.erase(i + 1, j - i - 1); } else { return false; } } // Case 2. // Either join is the last expression in embedded filter like, ... Join // or it is the last expression in a sub-expression like, ... ( ... Join ) ... else if ((j >= embedded_filter.size() || embedded_filter[j] == ')') && i > 4) { if ((embedded_filter[i] == '&' && embedded_filter[i - 1] == '&') || (embedded_filter[i] == '|' && embedded_filter[i - 1] == '|')) { i--; while (i > 0 && embedded_filter[--i] == ' '); embedded_filter.erase(i + 1, j - i - 1); } else { return false; } } // Case 3. // Join is in between filter expressions like, ... && Join && ... else if (i > 4 && j + 4 < embedded_filter.size()) { if ((embedded_filter[i] == '&' && embedded_filter[i - 1] == '&' && embedded_filter[j] == '&' && embedded_filter[j + 1] == '&') || (embedded_filter[i] == '|' && embedded_filter[i - 1] == '|' && embedded_filter[j] == '|' && embedded_filter[j + 1] == '|')) { j++; embedded_filter.erase(i + 1, j - i); } else { return false; } } else { return false; } } return true; }
68,595
C++
.cpp
1,236
40.251618
183
0.529371
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,784
store.cpp
typesense_typesense/src/store.cpp
#include "include/store.h" Store::Store(const std::string & state_dir_path, const size_t wal_ttl_secs, const size_t wal_size_mb, bool disable_wal, int32_t ttl): state_dir_path(state_dir_path){ // Optimize RocksDB options.IncreaseParallelism(); options.OptimizeLevelStyleCompaction(); // create the DB if it's not already present options.create_if_missing = true; options.write_buffer_size = 4*1048576; options.max_write_buffer_number = 2; options.merge_operator.reset(new UInt64AddOperator); options.compression = rocksdb::CompressionType::kSnappyCompression; options.max_log_file_size = 4*1048576; options.keep_log_file_num = 5; /*options.table_properties_collector_factories.emplace_back( rocksdb::NewCompactOnDeletionCollectorFactory(10000, 7500, 0.5));*/ // these need to be high for replication scenarios options.WAL_ttl_seconds = wal_ttl_secs; options.WAL_size_limit_MB = wal_size_mb; // Disable WAL for master writes (Raft's WAL is used) // The replica uses native WAL, though. write_options.disableWAL = disable_wal; // open DB init_db(ttl); } Store::~Store() { close(); } rocksdb::Status Store::init_db(int32_t ttl) { LOG(INFO) << "Initializing DB by opening state dir: " << state_dir_path; rocksdb::Status s; if(ttl > 0) { rocksdb::DBWithTTL* dbWithTtl; s = rocksdb::DBWithTTL::Open(options, state_dir_path, &dbWithTtl, ttl, false); db = dbWithTtl; } else { s = rocksdb::DB::Open(options, state_dir_path, &db); } if(!s.ok()) { LOG(ERROR) << "Error while initializing store: " << s.ToString(); if(s.code() == rocksdb::Status::Code::kIOError) { LOG(ERROR) << "It seems like the data directory " << state_dir_path << " is already being used by " << "another Typesense server. "; LOG(ERROR) << "If you are SURE that this is not the case, delete the LOCK file " << "in the data db directory and try again."; } } assert(s.ok()); return s; } bool Store::insert(const std::string& key, const std::string& value) { std::shared_lock lock(mutex); rocksdb::Status status = db->Put(write_options, key, value); return status.ok(); } bool Store::batch_write(rocksdb::WriteBatch& batch) { std::shared_lock lock(mutex); rocksdb::Status status = db->Write(write_options, &batch); return status.ok(); } bool Store::contains(const std::string& key) const { std::shared_lock lock(mutex); std::string value; bool value_found; bool key_may_exist = db->KeyMayExist(rocksdb::ReadOptions(), key, &value, &value_found); // returns false when key definitely does not exist if(!key_may_exist) { return false; } if(value_found) { return true; } // otherwise, we have try getting the value rocksdb::Status status = db->Get(rocksdb::ReadOptions(), key, &value); return status.ok() && !status.IsNotFound(); } StoreStatus Store::get(const std::string& key, std::string& value) const { std::shared_lock lock(mutex); rocksdb::Status status = db->Get(rocksdb::ReadOptions(), key, &value); if(status.ok()) { return StoreStatus::FOUND; } if(status.IsNotFound()) { return StoreStatus::NOT_FOUND; } LOG(ERROR) << "Error while fetching the key: " << key << " - status is: " << status.ToString(); return StoreStatus::ERROR; } bool Store::remove(const std::string& key) { std::shared_lock lock(mutex); rocksdb::Status status = db->Delete(write_options, key); return status.ok(); } rocksdb::Iterator* Store::scan(const std::string & prefix, const rocksdb::Slice* iterate_upper_bound) { std::shared_lock lock(mutex); rocksdb::ReadOptions read_opts; if(iterate_upper_bound) { read_opts.iterate_upper_bound = iterate_upper_bound; } rocksdb::Iterator *iter = db->NewIterator(read_opts); iter->Seek(prefix); return iter; } rocksdb::Iterator* Store::get_iterator() { std::shared_lock lock(mutex); rocksdb::Iterator* it = db->NewIterator(rocksdb::ReadOptions()); return it; } void Store::scan_fill(const std::string& prefix_start, const std::string& prefix_end, std::vector<std::string> & values) { rocksdb::ReadOptions read_opts; rocksdb::Slice upper_bound(prefix_end); read_opts.iterate_upper_bound = &upper_bound; std::shared_lock lock(mutex); rocksdb::Iterator *iter = db->NewIterator(read_opts); for (iter->Seek(prefix_start); iter->Valid() && iter->key().starts_with(prefix_start); iter->Next()) { values.push_back(iter->value().ToString()); } delete iter; } void Store::increment(const std::string & key, uint32_t value) { std::shared_lock lock(mutex); db->Merge(write_options, key, StringUtils::serialize_uint32_t(value)); } uint64_t Store::get_latest_seq_number() const { std::shared_lock lock(mutex); return db->GetLatestSequenceNumber(); } Option<std::vector<std::string>*> Store::get_updates_since(const uint64_t seq_number_org, const uint64_t max_updates) const { std::shared_lock lock(mutex); const uint64_t local_latest_seq_num = db->GetLatestSequenceNumber(); // Since GetUpdatesSince(0) == GetUpdatesSince(1) const uint64_t seq_number = (seq_number_org == 0) ? 1 : seq_number_org; if(seq_number == local_latest_seq_num+1) { // replica has caught up, send an empty list as result std::vector<std::string>* updates = new std::vector<std::string>(); return Option<std::vector<std::string>*>(updates); } std::unique_ptr<rocksdb::TransactionLogIterator> iter; rocksdb::Status status = db->GetUpdatesSince(seq_number, &iter); if(!status.ok()) { LOG(ERROR) << "Error while fetching updates for replication: " << status.ToString(); std::ostringstream error; error << "Unable to fetch updates. " << "Master's latest sequence number is " << local_latest_seq_num << " but requested sequence number is " << seq_number; LOG(ERROR) << error.str(); return Option<std::vector<std::string>*>(400, error.str()); } if(!iter->Valid()) { std::ostringstream error; error << "Invalid iterator. Master's latest sequence number is " << local_latest_seq_num << " but " << "updates are requested from sequence number " << seq_number << ". " << "The master's WAL entries might have expired (they are kept only for 24 hours)."; LOG(ERROR) << error.str(); return Option<std::vector<std::string>*>(400, error.str()); } uint64_t num_updates = 0; std::vector<std::string>* updates = new std::vector<std::string>(); bool first_iteration = true; while(iter->Valid() && num_updates < max_updates) { const rocksdb::BatchResult & batch = iter->GetBatch(); if(first_iteration) { first_iteration = false; if(batch.sequence != seq_number) { std::ostringstream error; error << "Invalid iterator. Requested sequence number is " << seq_number << " but " << "updates are available only from sequence number " << batch.sequence << ". " << "The master's WAL entries might have expired (they are kept only for 24 hours)."; LOG(ERROR) << error.str(); return Option<std::vector<std::string>*>(400, error.str()); } } const std::string & write_batch_serialized = batch.writeBatchPtr->Data(); updates->push_back(write_batch_serialized); num_updates += 1; iter->Next(); } return Option<std::vector<std::string>*>(updates); } void Store::close() { std::unique_lock lock(mutex); delete db; db = nullptr; } int Store::reload(bool clear_state_dir, const std::string& snapshot_path, int32_t ttl) { std::unique_lock lock(mutex); // we don't use close() to avoid nested lock and because lock is required until db is re-initialized delete db; db = nullptr; if(clear_state_dir) { if (!delete_path(state_dir_path, true)) { LOG(WARNING) << "rm " << state_dir_path << " failed"; return -1; } LOG(INFO) << "rm " << state_dir_path << " success"; } if(!snapshot_path.empty()) { // tries to use link if possible, or else copies if (!copy_dir(snapshot_path, state_dir_path)) { LOG(WARNING) << "copy snapshot " << snapshot_path << " to " << state_dir_path << " failed"; return -1; } LOG(INFO) << "copy snapshot " << snapshot_path << " to " << state_dir_path << " success"; } if (!create_directory(state_dir_path)) { LOG(WARNING) << "CreateDirectory " << state_dir_path << " failed"; return -1; } const rocksdb::Status& status = init_db(ttl); if (!status.ok()) { LOG(WARNING) << "Open DB " << state_dir_path << " failed, msg: " << status.ToString(); return -1; } LOG(INFO) << "DB open success!"; return 0; } void Store::flush() { std::shared_lock lock(mutex); rocksdb::FlushOptions options; db->Flush(options); } rocksdb::Status Store::compact_all() { std::shared_lock lock(mutex); return db->CompactRange(rocksdb::CompactRangeOptions(), nullptr, nullptr); } rocksdb::Status Store::create_check_point(rocksdb::Checkpoint** checkpoint_ptr, const std::string& db_snapshot_path) { std::shared_lock lock(mutex); rocksdb::Status status = rocksdb::Checkpoint::Create(db, checkpoint_ptr); if(!status.ok()) { LOG(ERROR) << "Checkpoint Create failed, msg:" << status.ToString(); return status; } status = (*checkpoint_ptr)->CreateCheckpoint(db_snapshot_path); if(!status.ok()) { LOG(WARNING) << "Checkpoint CreateCheckpoint failed at snapshot path: " << db_snapshot_path << ", msg:" << status.ToString(); } return status; } rocksdb::Status Store::delete_range(const std::string& begin_key, const std::string& end_key) { std::shared_lock lock(mutex); return db->DeleteRange(rocksdb::WriteOptions(), db->DefaultColumnFamily(), begin_key, end_key); } rocksdb::Status Store::compact_range(const rocksdb::Slice& begin_key, const rocksdb::Slice& end_key) { std::shared_lock lock(mutex); return db->CompactRange(rocksdb::CompactRangeOptions(), &begin_key, &end_key); } rocksdb::DB* Store::_get_db_unsafe() const { return db; } const std::string& Store::get_state_dir_path() const { return state_dir_path; } const rocksdb::Options& Store::get_db_options() const { return options; } void Store::print_memory_usage() { std::string index_usage; db->GetProperty("rocksdb.estimate-table-readers-mem", &index_usage); LOG(INFO) << "rocksdb.estimate-table-readers-mem: " << index_usage; std::string memtable_usage; db->GetProperty("rocksdb.cur-size-all-mem-tables", &memtable_usage); LOG(INFO) << "rocksdb.cur-size-all-mem-tables: " << memtable_usage; } void Store::get_last_N_values(const std::string& userid_prefix, uint32_t N, std::vector<std::string>& values) { std::shared_lock lock(mutex); rocksdb::Iterator* iter = db->NewIterator(rocksdb::ReadOptions()); auto prefix_key = userid_prefix + "~"; iter->SeekForPrev(prefix_key); while(iter->Valid() && N) { auto key = iter->key().ToString(); if(!StringUtils::begins_with(key, userid_prefix)) { break; } values.push_back(iter->value().ToString()); N--; iter->Prev(); } delete iter; }
11,806
C++
.cpp
282
35.574468
125
0.637094
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,785
aq_model.cpp
typesense_typesense/src/aq_model.cpp
#include "vq_model.h" #include <sstream> #define DR_WAV_IMPLEMENTATION #include "dr_wav.h" whisper_context* WhisperModel::validate_and_load_model(const std::string& model_path) { return whisper_init_from_file(model_path.c_str()); } WhisperModel::WhisperModel(whisper_context* ctx, const std::string& model_name) : ctx(ctx), VQModel(model_name) { // surpress whisper logs whisper_log_set([](enum ggml_log_level level, const char * text, void * user_data) { }, nullptr); if(whisper_is_multilingual(ctx)) { params.language = "auto"; params.detect_language = true; } params.suppress_non_speech_tokens = true; } WhisperModel::~WhisperModel() { whisper_free(ctx); } bool WhisperModel::read_wav(const void* data, size_t size, std::vector<float>& pcmf32) { drwav wav; if(!drwav_init_memory(&wav, data, size, nullptr)) { return false; } if(wav.channels != 1 && wav.channels != 2) { drwav_uninit(&wav); return false; } if(wav.bitsPerSample != 16) { drwav_uninit(&wav); return false; } if(wav.sampleRate != 16000) { drwav_uninit(&wav); return false; } const uint64_t samples = wav.totalPCMFrameCount * wav.channels; std::vector<int16_t> pcmi16(samples); drwav_read_pcm_frames_s16(&wav, wav.totalPCMFrameCount, pcmi16.data()); drwav_uninit(&wav); pcmf32.resize(samples); if(wav.channels == 1) { for (uint64_t i = 0; i < wav.totalPCMFrameCount; i++) { pcmf32[i] = float(pcmi16[i]) / 32768.0f; } } else { for (uint64_t i = 0; i < wav.totalPCMFrameCount; i++) { pcmf32[i] = float(pcmi16[2 * i] + pcmi16[2 * i + 1]) / 65536.0f; } } return true; } Option<std::string> WhisperModel::transcribe(const std::string& audio_base64) { std::vector<float> pcmf32; // Decode audio auto raw_audio = StringUtils::base64_decode(audio_base64); // Read wav auto res = read_wav(raw_audio.data(), raw_audio.size(), pcmf32); if(!res) { return Option<std::string>(400, "Invalid audio format. Please provide a 16-bit 16kHz wav file."); } { std::unique_lock<std::mutex> lock(mutex); if(whisper_full_parallel(ctx, params, pcmf32.data(), pcmf32.size(), 1) != 0) { return Option<std::string>(400, "Error while transcribing."); } } std::stringstream ss; for(int i = 0; i < whisper_full_n_segments(ctx); i++) { ss << whisper_full_get_segment_text(ctx, i); } std::string result = ss.str(); return Option<std::string>(StringUtils::trim(result)); }
2,729
C++
.cpp
75
29.706667
116
0.612167
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,786
validator.cpp
typesense_typesense/src/validator.cpp
#include "validator.h" #include "field.h" Option<uint32_t> validator_t::coerce_element(const field& a_field, nlohmann::json& document, nlohmann::json& doc_ele, const std::string& fallback_field_type, const DIRTY_VALUES& dirty_values) { const std::string& field_name = a_field.name; bool array_ele_erased = false; nlohmann::json::iterator dummy_iter; if(a_field.type == field_types::STRING) { if(!doc_ele.is_string()) { Option<uint32_t> coerce_op = coerce_string(dirty_values, fallback_field_type, a_field, document, field_name, dummy_iter, false, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } } else if(a_field.type == field_types::INT32) { if(!doc_ele.is_number_integer()) { Option<uint32_t> coerce_op = coerce_int32_t(dirty_values, a_field, document, field_name, dummy_iter, false, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } } else if(a_field.type == field_types::INT64) { if(!doc_ele.is_number_integer()) { Option<uint32_t> coerce_op = coerce_int64_t(dirty_values, a_field, document, field_name, dummy_iter, false, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } } else if(a_field.type == field_types::FLOAT) { if(!doc_ele.is_number()) { // using `is_number` allows integer to be passed to a float field Option<uint32_t> coerce_op = coerce_float(dirty_values, a_field, document, field_name, dummy_iter, false, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } } else if(a_field.type == field_types::BOOL) { if(!doc_ele.is_boolean()) { Option<uint32_t> coerce_op = coerce_bool(dirty_values, a_field, document, field_name, dummy_iter, false, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } } else if(a_field.type == field_types::GEOPOINT) { if(!doc_ele.is_array() || doc_ele.size() != 2) { return Option<>(400, "Field `" + field_name + "` must be a 2 element array: [lat, lng]."); } if(!(doc_ele[0].is_number() && doc_ele[1].is_number())) { // one or more elements is not a number, try to coerce Option<uint32_t> coerce_op = coerce_geopoint(dirty_values, a_field, document, field_name, doc_ele[0], doc_ele[1], dummy_iter, false, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } } else if(a_field.is_array()) { if (doc_ele.is_null()) { doc_ele = nlohmann::json::array(); } if(!doc_ele.is_array()) { bool is_auto_embedding = a_field.type == field_types::FLOAT_ARRAY && a_field.embed.count(fields::from) > 0; if((a_field.optional && (dirty_values == DIRTY_VALUES::DROP || dirty_values == DIRTY_VALUES::COERCE_OR_DROP)) || is_auto_embedding) { document.erase(field_name); return Option<uint32_t>(200); } else { return Option<>(400, "Field `" + field_name + "` must be an array."); } } nlohmann::json::iterator it = doc_ele.begin(); // have to differentiate the geopoint[] type of a nested array object's geopoint[] vs a simple nested field // geopoint[] type of an array of objects field won't be an array of array if(a_field.nested && a_field.type == field_types::GEOPOINT_ARRAY && it != doc_ele.end() && it->is_number()) { if(!doc_ele.empty() && doc_ele.size() % 2 != 0) { return Option<>(400, "Nested field `" + field_name + "` does not contain valid geopoint values."); } const auto& item = doc_ele; for(size_t ai = 0; ai < doc_ele.size(); ai+=2) { if(!(doc_ele[ai].is_number() && doc_ele[ai+1].is_number())) { // one or more elements is not an number, try to coerce Option<uint32_t> coerce_op = coerce_geopoint(dirty_values, a_field, document, field_name, doc_ele[ai], doc_ele[ai+1], it, true, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } it++; } return Option<uint32_t>(200); } if(a_field.type == field_types::FLOAT_ARRAY && a_field.num_dim != 0 && a_field.num_dim != doc_ele.size()) { return Option<uint32_t>(400, "Field `" + a_field.name + "` must have " + std::to_string(a_field.num_dim) + " dimensions."); } for(; it != doc_ele.end(); ) { nlohmann::json& item = it.value(); array_ele_erased = false; if (a_field.type == field_types::STRING_ARRAY && !item.is_string()) { Option<uint32_t> coerce_op = coerce_string(dirty_values, fallback_field_type, a_field, document, field_name, it, true, array_ele_erased); if (!coerce_op.ok()) { return coerce_op; } } else if (a_field.type == field_types::INT32_ARRAY && !item.is_number_integer()) { Option<uint32_t> coerce_op = coerce_int32_t(dirty_values, a_field, document, field_name, it, true, array_ele_erased); if (!coerce_op.ok()) { return coerce_op; } } else if (a_field.type == field_types::INT64_ARRAY && !item.is_number_integer()) { Option<uint32_t> coerce_op = coerce_int64_t(dirty_values, a_field, document, field_name, it, true, array_ele_erased); if (!coerce_op.ok()) { return coerce_op; } } else if (a_field.type == field_types::FLOAT_ARRAY && !item.is_number()) { // we check for `is_number` to allow whole numbers to be passed into float fields Option<uint32_t> coerce_op = coerce_float(dirty_values, a_field, document, field_name, it, true, array_ele_erased); if (!coerce_op.ok()) { return coerce_op; } } else if (a_field.type == field_types::BOOL_ARRAY && !item.is_boolean()) { Option<uint32_t> coerce_op = coerce_bool(dirty_values, a_field, document, field_name, it, true, array_ele_erased); if (!coerce_op.ok()) { return coerce_op; } } else if (a_field.type == field_types::GEOPOINT_ARRAY) { if(!item.is_array() || item.size() != 2) { return Option<>(400, "Field `" + field_name + "` must contain 2 element arrays: [ [lat, lng],... ]."); } if(!(item[0].is_number() && item[1].is_number())) { // one or more elements is not a number, try to coerce Option<uint32_t> coerce_op = coerce_geopoint(dirty_values, a_field, document, field_name, item[0], item[1], it, true, array_ele_erased); if(!coerce_op.ok()) { return coerce_op; } } } if(!array_ele_erased) { // if it is erased, the iterator will be reassigned it++; } } } return Option<uint32_t>(200); } Option<uint32_t> validator_t::coerce_string(const DIRTY_VALUES& dirty_values, const std::string& fallback_field_type, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased) { std::string suffix = is_array ? "an array of" : "a"; auto& item = is_array ? array_iter.value() : document[field_name]; if(dirty_values == DIRTY_VALUES::REJECT) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " string."); } if(dirty_values == DIRTY_VALUES::DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " string."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } return Option<uint32_t>(200); } // we will try to coerce the value to a string if (item.is_number_integer()) { item = std::to_string((int64_t)item); } else if(item.is_number_float()) { item = StringUtils::float_to_str((float)item); } else if(item.is_boolean()) { item = item == true ? "true" : "false"; } else { if(dirty_values == DIRTY_VALUES::COERCE_OR_DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " string."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } } else { // COERCE_OR_REJECT / non-optional + DROP if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " string."); } } return Option<>(200); } Option<uint32_t> validator_t::coerce_int32_t(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased) { std::string suffix = is_array ? "an array of" : "an"; auto& item = is_array ? array_iter.value() : document[field_name]; if(dirty_values == DIRTY_VALUES::REJECT) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int32."); } if(dirty_values == DIRTY_VALUES::DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int32."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } return Option<uint32_t>(200); } // try to value coerce into an integer if(item.is_number_float()) { item = static_cast<int32_t>(item.get<float>()); } else if(item.is_boolean()) { item = item == true ? 1 : 0; } else if(item.is_string() && StringUtils::is_int32_t(item)) { item = std::atol(item.get<std::string>().c_str()); } else { if(dirty_values == DIRTY_VALUES::COERCE_OR_DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int32."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } } else { // COERCE_OR_REJECT / non-optional + DROP if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int32."); } } if(document.contains(field_name) && document[field_name].get<int64_t>() > INT32_MAX) { if(a_field.optional && (dirty_values == DIRTY_VALUES::DROP || dirty_values == DIRTY_VALUES::COERCE_OR_REJECT)) { document.erase(field_name); } else { return Option<>(400, "Field `" + field_name + "` exceeds maximum value of int32."); } } return Option<uint32_t>(200); } Option<uint32_t> validator_t::coerce_int64_t(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased) { std::string suffix = is_array ? "an array of" : "an"; auto& item = is_array ? array_iter.value() : document[field_name]; // Object array reference helper field. It's not provided by the user. if(is_array && a_field.nested && a_field.is_reference_helper) { // It's an array of two uint32_t values indicating the object index and referenced doc id respectively. if(item.size() != 2 || !item.at(0).is_number_unsigned() || !item.at(1).is_number_unsigned()) { return Option<>(400, "`" + field_name + "` object array reference helper field has wrong value `" + item.dump() + "`."); } return Option<uint32_t>(200); } if(dirty_values == DIRTY_VALUES::REJECT) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int64."); } if(dirty_values == DIRTY_VALUES::DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int64."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } return Option<uint32_t>(200); } // try to value coerce into an integer if(item.is_number_float()) { item = static_cast<int64_t>(item.get<float>()); } else if(item.is_boolean()) { item = item == true ? 1 : 0; } else if(item.is_string() && StringUtils::is_int64_t(item)) { item = std::atoll(item.get<std::string>().c_str()); } else { if(dirty_values == DIRTY_VALUES::COERCE_OR_DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int64."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } } else { // COERCE_OR_REJECT / non-optional + DROP if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " int64."); } } return Option<uint32_t>(200); } Option<uint32_t> validator_t::coerce_bool(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased) { std::string suffix = is_array ? "a array of" : "a"; auto& item = is_array ? array_iter.value() : document[field_name]; if(dirty_values == DIRTY_VALUES::REJECT) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " bool."); } if(dirty_values == DIRTY_VALUES::DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " bool."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } return Option<uint32_t>(200); } // try to value coerce into a bool if (item.is_number_integer() && (item.get<int64_t>() == 1 || item.get<int64_t>() == 0)) { item = item.get<int64_t>() == 1; } else if(item.is_string()) { std::string str_val = item.get<std::string>(); StringUtils::tolowercase(str_val); if(str_val == "true") { item = true; return Option<uint32_t>(200); } else if(str_val == "false") { item = false; return Option<uint32_t>(200); } else { return Option<>(400, "Field `" + field_name + "` must be " + suffix + " bool."); } } else { if(dirty_values == DIRTY_VALUES::COERCE_OR_DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " bool."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } } else { // COERCE_OR_REJECT / non-optional + DROP if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " bool."); } } return Option<uint32_t>(200); } Option<uint32_t> validator_t::coerce_geopoint(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json& lat, nlohmann::json& lng, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased) { std::string suffix = is_array ? "an array of" : "a"; if(dirty_values == DIRTY_VALUES::REJECT) { return Option<>(400, "Field `" + field_name + "` must be " + suffix + " geopoint."); } if(dirty_values == DIRTY_VALUES::DROP) { if(!a_field.optional) { return Option<>(400, "Field `" + field_name + "` must be " + suffix + " geopoint."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } return Option<uint32_t>(200); } // try to value coerce into a geopoint if(!lat.is_number() && lat.is_string()) { if(StringUtils::is_float(lat)) { lat = std::stof(lat.get<std::string>()); } } if(!lng.is_number() && lng.is_string()) { if(StringUtils::is_float(lng)) { lng = std::stof(lng.get<std::string>()); } } if(!lat.is_number() || !lng.is_number()) { if(dirty_values == DIRTY_VALUES::COERCE_OR_DROP) { if(!a_field.optional) { return Option<>(400, "Field `" + field_name + "` must be " + suffix + " geopoint."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } } else { // COERCE_OR_REJECT / non-optional + DROP return Option<>(400, "Field `" + field_name + "` must be " + suffix + " geopoint."); } } return Option<uint32_t>(200); } Option<uint32_t> validator_t::coerce_float(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased) { std::string suffix = is_array ? "a array of" : "a"; auto& item = is_array ? array_iter.value() : document[field_name]; if(dirty_values == DIRTY_VALUES::REJECT) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " float."); } if(dirty_values == DIRTY_VALUES::DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " float."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } return Option<uint32_t>(200); } // try to value coerce into a float if(item.is_string() && StringUtils::is_float(item)) { item = std::atof(item.get<std::string>().c_str()); } else if(item.is_boolean()) { item = item == true ? 1.0 : 0.0; } else { if(dirty_values == DIRTY_VALUES::COERCE_OR_DROP) { if(!a_field.optional) { if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " float."); } if(!is_array) { document.erase(field_name); } else { array_iter = document[field_name].erase(array_iter); array_ele_erased = true; } } else { // COERCE_OR_REJECT / non-optional + DROP if(a_field.nested && item.is_array()) { return Option<>(400, "Field `" + field_name + "` has an incorrect type. " "Hint: field inside an array of objects must be an array type as well."); } return Option<>(400, "Field `" + field_name + "` must be " + suffix + " float."); } } return Option<uint32_t>(200); } Option<uint32_t> validator_t::validate_index_in_memory(nlohmann::json& document, uint32_t seq_id, const std::string & default_sorting_field, const tsl::htrie_map<char, field> & search_schema, const tsl::htrie_map<char, field> & embedding_fields, const index_operation_t op, const bool is_update, const std::string& fallback_field_type, const DIRTY_VALUES& dirty_values, const bool validate_embedding_fields) { bool missing_default_sort_field = (!default_sorting_field.empty() && document.count(default_sorting_field) == 0); if((op == CREATE || op == UPSERT) && missing_default_sort_field) { return Option<>(400, "Field `" + default_sorting_field + "` has been declared as a default sorting field, " "but is not found in the document."); } for(const auto& a_field: search_schema) { const std::string& field_name = a_field.name; // ignore embedding fields, they will be validated later if(embedding_fields.count(field_name) > 0) { continue; } if(field_name == "id" || a_field.is_object()) { continue; } if((a_field.optional || op == UPDATE || (op == EMPLACE && is_update)) && document.count(field_name) == 0) { continue; } bool is_auto_embedding = a_field.type == field_types::FLOAT_ARRAY && a_field.embed.count(fields::from) > 0; if(document.count(field_name) == 0 && !is_auto_embedding && a_field.store) { return Option<>(400, "Field `" + field_name + "` has been declared in the schema, " "but is not found in the document."); } nlohmann::json& doc_ele = document[field_name]; if(a_field.optional && doc_ele.is_null()) { // we will ignore `null` on an option field if(!is_update) { // for updates, the erasure is done later since we need to keep the key for overwrite document.erase(field_name); } continue; } auto coerce_op = coerce_element(a_field, document, doc_ele, fallback_field_type, dirty_values); if(!coerce_op.ok()) { return coerce_op; } } if(validate_embedding_fields) { // validate embedding fields auto validate_embed_op = validate_embed_fields(document, embedding_fields, search_schema, is_update); if(!validate_embed_op.ok()) { return Option<>(validate_embed_op.code(), validate_embed_op.error()); } } return Option<>(200); } Option<bool> validator_t::validate_embed_fields(const nlohmann::json& document, const tsl::htrie_map<char, field>& embedding_fields, const tsl::htrie_map<char, field> & search_schema, const bool& is_update) { for(const auto& field : embedding_fields) { if(document.contains(field.name) && !is_update) { const auto& field_vec = document[field.name]; if(!field_vec.is_array() || field_vec.empty() || !field_vec[0].is_number() || field_vec.size() != field.num_dim) { return Option<bool>(400, "Field `" + field.name + "` contains an invalid embedding."); } auto it = field_vec.begin(); while(it != field_vec.end()) { if(!it.value().is_number()) { return Option<bool>(400, "Field `" + field.name + "` contains invalid float values."); } it++; } continue; } const auto& embed_from = field.embed[fields::from].get<std::vector<std::string>>(); // flag to check if all fields to embed from are optional and null bool all_optional_and_null = true; for(const auto& field_name : embed_from) { auto schema_field_it = search_schema.find(field_name); auto doc_field_it = document.find(field_name); if(schema_field_it == search_schema.end()) { return Option<bool>(400, "Field `" + field.name + "` has invalid fields to create embeddings from."); } if(doc_field_it == document.end() || doc_field_it.value().is_null()) { if(!is_update && !schema_field_it->optional) { return Option<bool>(400, "Field `" + field_name + "` is needed to create embedding."); } else { continue; } } if(doc_field_it.value().is_null()) { continue; } all_optional_and_null = false; if((schema_field_it.value().type == field_types::STRING && !doc_field_it.value().is_string()) || (schema_field_it.value().type == field_types::STRING_ARRAY && !doc_field_it.value().is_array())) { return Option<bool>(400, "Field `" + field_name + "` has malformed data."); } if(doc_field_it.value().is_array()) { for(const auto& val : doc_field_it.value()) { if(!val.is_string()) { return Option<bool>(400, "Field `" + field_name + "` has malformed data."); } } } } if(all_optional_and_null && !field.optional && !is_update) { return Option<bool>(400, "No valid fields found to create embedding for `" + field.name + "`, please provide at least one valid field or make the embedding field optional."); } } return Option<bool>(true); }
33,017
C++
.cpp
653
36.346095
186
0.503922
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,787
posting_list.cpp
typesense_typesense/src/posting_list.cpp
#include "posting_list.h" #include <bitset> #include "for.h" #include "array_utils.h" #include "filter_result_iterator.h" /* block_t operations */ uint32_t posting_list_t::block_t::upsert(const uint32_t id, const std::vector<uint32_t>& positions) { if(id > ids.last() || ids.getLength() == 0) { // append to the end ids.append(id); uint32_t curr_index = offsets.getLength(); offset_index.append(curr_index); for(uint32_t position : positions) { offsets.append(position); } } else { // we have to check if `id` already exists, and do in-place update/insert uint32_t id_index = ids.indexOf(id); if(id_index == ids.getLength()) { // id not found, we have to insert it size_t inserted_index = ids.append(id); uint32_t existing_offset_index = offset_index.at(inserted_index); insert_and_shift_offset_index(inserted_index, positions.size()); offsets.insert(existing_offset_index, &positions[0], positions.size()); } else { // id is already present, so we will only update offset index and offsets uint32_t start_offset_index = offset_index.at(id_index); uint32_t end_offset_index = (id == ids.last()) ? offsets.getLength()-1 : offset_index.at(id_index + 1)-1; uint32_t num_offsets = (end_offset_index - start_offset_index) + 1; uint32_t* curr_offsets = offsets.uncompress(); uint32_t m = offsets.getMin(), M = offsets.getMax(); if(num_offsets == positions.size()) { // no need to touch the offset index and need to just do inplace updates of offsets bool find_new_min_max = false; for(size_t i = 0; i < positions.size(); i++) { if((curr_offsets[start_offset_index + i] == m || curr_offsets[start_offset_index + i] == M) && curr_offsets[start_offset_index + i] != positions[i]) { // when an existing min/max is affected we will have to find the new min/max find_new_min_max = true; } if(positions[i] < m) { m = positions[i]; } if(positions[i] > M) { M = positions[i]; } curr_offsets[start_offset_index + i] = positions[i]; } if(find_new_min_max) { for(size_t i = 0; i < offsets.getLength(); i++) { if(curr_offsets[i] < m) { m = curr_offsets[i]; } if(curr_offsets[i] > M) { M = curr_offsets[i]; } } } offsets.load(curr_offsets, offsets.getLength(), m, M); } else { // need to resize offsets array int64_t size_diff = int64_t(positions.size()) - num_offsets; // size_diff can be negative size_t new_offsets_length = offsets.getLength() + size_diff; uint32_t* new_offsets = new uint32_t[new_offsets_length]; std::memmove(new_offsets, curr_offsets, sizeof(uint32_t) * start_offset_index); bool find_new_min_max = false; for(size_t i = 0; i < num_offsets; i++) { if(curr_offsets[start_offset_index + i] == m || curr_offsets[start_offset_index + i] == M) { // when an existing min/max is affected we will have to find the new min/max find_new_min_max = true; } } for(size_t i = 0; i < positions.size(); i++) { if(positions[i] < m) { m = positions[i]; } if(positions[i] > M) { M = positions[i]; } new_offsets[start_offset_index + i] = positions[i]; } std::memmove(new_offsets + start_offset_index + positions.size(), curr_offsets + end_offset_index + 1, sizeof(uint32_t) * (offsets.getLength() - (end_offset_index + 1))); if(find_new_min_max) { for(size_t i = 0; i < offsets.getLength(); i++) { if(curr_offsets[i] < m) { m = curr_offsets[i]; } if(curr_offsets[i] > M) { M = curr_offsets[i]; } } } offsets.load(new_offsets, new_offsets_length, m, M); delete [] new_offsets; // shift offset index uint32_t* current_offset_index = offset_index.uncompress(); for(size_t i = id_index+1; i < ids.getLength(); i++) { current_offset_index[i] += size_diff; } offset_index.load(current_offset_index, offset_index.getLength()); delete [] current_offset_index; } delete [] curr_offsets; return 0; } } return 1; } uint32_t posting_list_t::block_t::erase(const uint32_t id) { uint32_t doc_index = ids.indexOf(id); if (doc_index == ids.getLength()) { return 0; } uint32_t start_offset = offset_index.at(doc_index); uint32_t end_offset = (doc_index == ids.getLength() - 1) ? offsets.getLength() : offset_index.at(doc_index + 1); uint32_t doc_indices[1] = {doc_index}; remove_and_shift_offset_index(doc_indices, 1); offsets.remove_index(start_offset, end_offset); ids.remove_value(id); return 1; } void posting_list_t::block_t::remove_and_shift_offset_index(const uint32_t* indices_sorted, const uint32_t num_indices) { uint32_t *curr_array = offset_index.uncompress(); uint32_t *new_array = new uint32_t[offset_index.getLength()]; new_array[0] = 0; uint32_t new_index = 0; uint32_t curr_index = 0; uint32_t indices_counter = 0; uint32_t shift_value = 0; while(curr_index < offset_index.getLength()) { if(indices_counter < num_indices && curr_index >= indices_sorted[indices_counter]) { // skip copying if(curr_index == indices_sorted[indices_counter]) { curr_index++; const uint32_t diff = curr_index == offset_index.getLength() ? 0 : (offset_index.at(curr_index) - offset_index.at(curr_index-1)); shift_value += diff; } indices_counter++; } else { new_array[new_index++] = curr_array[curr_index++] - shift_value; } } offset_index.load(new_array, new_index); delete[] curr_array; delete[] new_array; } void posting_list_t::block_t::insert_and_shift_offset_index(const uint32_t index, const uint32_t num_offsets) { uint32_t existing_offset_index = offset_index.at(index); uint32_t length = offset_index.getLength(); uint32_t new_length = length + 1; uint32_t *curr_array = offset_index.uncompress(new_length); memmove(&curr_array[index+1], &curr_array[index], sizeof(uint32_t)*(length - index)); curr_array[index] = existing_offset_index; uint32_t curr_index = index + 1; while(curr_index < new_length) { curr_array[curr_index] += num_offsets; curr_index++; } offset_index.load(curr_array, new_length); delete [] curr_array; } bool posting_list_t::block_t::contains(uint32_t id) { return ids.contains(id); } /* posting_list_t operations */ posting_list_t::posting_list_t(uint16_t max_block_elements): BLOCK_MAX_ELEMENTS(max_block_elements) { if(max_block_elements <= 1) { throw std::invalid_argument("max_block_elements must be > 1"); } } posting_list_t::~posting_list_t() { block_t* block = root_block.next; while(block != nullptr) { block_t* next_block = block->next; delete block; block = next_block; } } void posting_list_t::merge_adjacent_blocks(posting_list_t::block_t* block1, posting_list_t::block_t* block2, size_t num_block2_ids_to_move) { // merge ids uint32_t* ids1 = block1->ids.uncompress(); uint32_t* ids2 = block2->ids.uncompress(); size_t block1_orig_size = block1->size(); size_t block2_orig_size = block2->size(); size_t block1_orig_offset_size = block1->offsets.getLength(); size_t block2_orig_offset_size = block2->offsets.getLength(); size_t block1_orig_offset_index_size = block1->offset_index.getLength(); size_t block2_orig_offset_index_size = block2->offset_index.getLength(); uint32_t* new_ids = new uint32_t[block1->size() + num_block2_ids_to_move]; std::memmove(new_ids, ids1, sizeof(uint32_t) * block1->size()); std::memmove(new_ids + block1->size(), ids2, sizeof(uint32_t) * num_block2_ids_to_move); block1->ids.load(new_ids, block1->size() + num_block2_ids_to_move); if(block2->size() != num_block2_ids_to_move) { block2->ids.load(ids2 + num_block2_ids_to_move, block2->size() - num_block2_ids_to_move); } else { block2->ids.load(nullptr, 0); } delete [] ids1; delete [] ids2; delete [] new_ids; // merge offset indices uint32_t* offset_index1 = block1->offset_index.uncompress(); uint32_t* offset_index2 = block2->offset_index.uncompress(); uint32_t* new_offset_index = new uint32_t[block1_orig_size + block2_orig_size]; size_t num_block2_offsets_to_move = (num_block2_ids_to_move == block2_orig_size) ? block2->offsets.getLength() : offset_index2[num_block2_ids_to_move]; std::memmove(new_offset_index, offset_index1, sizeof(uint32_t) * block1->offset_index.getLength()); size_t start_index = block1->offset_index.getLength(); size_t base_offset_len = block1->offsets.getLength(); for(size_t i = 0; i < num_block2_ids_to_move; i++) { new_offset_index[start_index + i] = offset_index2[i] + base_offset_len; } block1->offset_index.load(new_offset_index, block1->offset_index.getLength() + num_block2_ids_to_move); if(block2->offset_index.getLength() != num_block2_ids_to_move) { const uint32_t offset_index2_base_index = offset_index2[num_block2_ids_to_move]; for(size_t i = 0; i < (block2_orig_size - num_block2_ids_to_move); i++) { offset_index2[num_block2_ids_to_move + i] -= offset_index2_base_index; } block2->offset_index.load(offset_index2 + num_block2_ids_to_move, block2_orig_size - num_block2_ids_to_move); } else { block2->offset_index.load(nullptr, 0); } // merge offsets uint32_t* offsets1 = block1->offsets.uncompress(); uint32_t* offsets2 = block2->offsets.uncompress(); // we will have to compute new min and max for new block1 and block2 offsets size_t new_block1_offsets_size = block1->offsets.getLength() + num_block2_offsets_to_move; uint32_t* new_block1_offsets = new uint32_t[new_block1_offsets_size]; uint32_t min = block1->offsets.getLength() != 0 ? offsets1[0] : 0; uint32_t max = min; // we have to manually copy over so we can find the new min and max for(size_t i = 0; i < block1->offsets.getLength(); i++) { new_block1_offsets[i] = offsets1[i]; if(new_block1_offsets[i] < min) { min = new_block1_offsets[i]; } if(new_block1_offsets[i] > max) { max = new_block1_offsets[i]; } } size_t block2_base_index = block1->offsets.getLength(); for(size_t i = 0; i < num_block2_offsets_to_move; i++) { size_t j = block2_base_index + i; new_block1_offsets[j] = offsets2[i]; if(new_block1_offsets[j] < min) { min = new_block1_offsets[j]; } if(new_block1_offsets[j] > max) { max = new_block1_offsets[j]; } } block1->offsets.load(new_block1_offsets, new_block1_offsets_size, min, max); // reset block2 offsets with remaining elements if(block2->offsets.getLength() != num_block2_offsets_to_move) { const size_t block2_new_offsets_length = (block2->offsets.getLength() - num_block2_offsets_to_move); uint32_t* block2_new_raw_offsets = new uint32_t[block2_new_offsets_length]; min = max = offsets2[num_block2_offsets_to_move]; for(size_t i = 0; i < block2_new_offsets_length; i++) { block2_new_raw_offsets[i] = offsets2[num_block2_offsets_to_move + i]; if(block2_new_raw_offsets[i] < min) { min = block2_new_raw_offsets[i]; } if(block2_new_raw_offsets[i] > max) { max = block2_new_raw_offsets[i]; } } block2->offsets.load(block2_new_raw_offsets, block2_new_offsets_length, min, max); delete [] block2_new_raw_offsets; } else { block2->offsets.load(nullptr, 0, 0, 0); } if(block1->offsets.getLength() < block1->offset_index.getLength()) { LOG(ERROR) << "Block offset length is smaller than offset index length after merging."; } delete [] offset_index1; delete [] offset_index2; delete [] new_offset_index; delete [] offsets1; delete [] offsets2; delete [] new_block1_offsets; } /*void print_vec(const std::vector<uint32_t>& vec) { LOG(INFO) << "---"; for(auto x: vec) { LOG(INFO) << x; } LOG(INFO) << "---"; }*/ void posting_list_t::split_block(posting_list_t::block_t* src_block, posting_list_t::block_t* dst_block) { if(src_block->size() <= 1) { return; } uint32_t* raw_ids = src_block->ids.uncompress(); size_t ids_first_half_length = (src_block->size() / 2); size_t ids_second_half_length = (src_block->size() - ids_first_half_length); src_block->ids.load(raw_ids, ids_first_half_length); dst_block->ids.load(raw_ids + ids_first_half_length, ids_second_half_length); uint32_t* raw_offset_indices = src_block->offset_index.uncompress(); size_t offset_indices_first_half_length = (src_block->offset_index.getLength() / 2); size_t offset_indices_second_half_length = (src_block->offset_index.getLength() - offset_indices_first_half_length); src_block->offset_index.load(raw_offset_indices, offset_indices_first_half_length); // update second half to use zero based index uint32_t base_index_diff = raw_offset_indices[offset_indices_first_half_length]; for(size_t i = 0; i < offset_indices_second_half_length; i++) { raw_offset_indices[offset_indices_first_half_length + i] -= base_index_diff; } dst_block->offset_index.load(raw_offset_indices + offset_indices_first_half_length, offset_indices_second_half_length); uint32_t* raw_offsets = src_block->offsets.uncompress(); size_t src_offsets_length = src_block->offsets.getLength(); // load first half of offsets size_t offset_first_half_length = base_index_diff; // we need to find new min and max uint32_t min = raw_offsets[0], max = raw_offsets[0]; for(size_t i = 0; i < offset_first_half_length; i++) { if(raw_offsets[i] < min) { min = raw_offsets[i]; } if(raw_offsets[i] > max) { max = raw_offsets[i]; } } src_block->offsets.load(raw_offsets, offset_first_half_length, min, max); // load second half min = max = raw_offsets[offset_first_half_length]; for(size_t i = offset_first_half_length; i < src_offsets_length; i++) { if(raw_offsets[i] < min) { min = raw_offsets[i]; } if(raw_offsets[i] > max) { max = raw_offsets[i]; } } size_t offsets_second_half_length = src_offsets_length - offset_first_half_length; dst_block->offsets.load(raw_offsets + offset_first_half_length, offsets_second_half_length, min, max); if(dst_block->offsets.getLength() < dst_block->offset_index.getLength() || src_block->offsets.getLength() < src_block->offset_index.getLength()) { LOG(ERROR) << "Block offset length is smaller than offset index length after splitting."; } delete [] raw_ids; delete [] raw_offset_indices; delete [] raw_offsets; } void posting_list_t::upsert(const uint32_t id, const std::vector<uint32_t>& offsets) { // first we will locate the block where `id` should reside block_t* upsert_block; last_id_t before_upsert_last_id; if(id_block_map.empty()) { upsert_block = &root_block; before_upsert_last_id = UINT32_MAX; } else { const auto it = id_block_map.lower_bound(id); upsert_block = (it == id_block_map.end()) ? id_block_map.rbegin()->second : it->second; before_upsert_last_id = upsert_block->ids.last(); } // happy path: upsert_block is not full if(upsert_block->size() < BLOCK_MAX_ELEMENTS) { uint32_t num_inserted = upsert_block->upsert(id, offsets); ids_length += num_inserted; last_id_t after_upsert_last_id = upsert_block->ids.last(); if(before_upsert_last_id != after_upsert_last_id) { id_block_map.erase(before_upsert_last_id); id_block_map.emplace(after_upsert_last_id, upsert_block); } } else { block_t* new_block = new block_t; if(upsert_block->next == nullptr && upsert_block->ids.last() < id) { // appending to the end of the last block where the id will reside on a newly block uint32_t num_inserted = new_block->upsert(id, offsets); ids_length += num_inserted; } else { // upsert and then split block uint32_t num_inserted = upsert_block->upsert(id, offsets); ids_length += num_inserted; // evenly divide elements between both blocks split_block(upsert_block, new_block); last_id_t after_upsert_last_id = upsert_block->ids.last(); id_block_map.erase(before_upsert_last_id); id_block_map.emplace(after_upsert_last_id, upsert_block); } last_id_t after_new_block_id = new_block->ids.last(); id_block_map.emplace(after_new_block_id, new_block); new_block->next = upsert_block->next; upsert_block->next = new_block; } } void posting_list_t::dump() { auto it = new_iterator(); std::string ids_str; std::string offset_index_str; std::string offsets_str; while(it.valid()) { auto index = it.index(); while(index < it.block()->size()) { ids_str += std::to_string(it.ids[index]) + ", "; offset_index_str += std::to_string(it.offset_index[index]) + ", "; index++; } auto last_offset_index = it.offset_index[it.block()->size()-1]; for(size_t j = 0; j <= last_offset_index; j++) { offsets_str += std::to_string(it.offsets[j]) + ", "; } it.set_index(it.block()->size()-1); it.next(); } LOG(INFO) << "ids_str:"; LOG(INFO) << ids_str; LOG(INFO) << "offset_index_str:"; LOG(INFO) << offset_index_str; LOG(INFO) << "offsets_str:"; LOG(INFO) << offsets_str; } void posting_list_t::erase(const uint32_t id) { const auto it = id_block_map.lower_bound(id); if(it == id_block_map.end()) { return ; } block_t* erase_block = it->second; last_id_t before_last_id = it->first; uint32_t num_erased = erase_block->erase(id); ids_length -= num_erased; size_t new_ids_length = erase_block->size(); if(new_ids_length == 0) { // happens when the last element of last block is deleted if(erase_block != &root_block) { // since we will be deleting the empty node, set the previous node's next pointer to null std::prev(it)->second->next = nullptr; delete erase_block; } else { // The root block cannot be empty if there are other blocks so we will pull some contents from next block // This is only an issue for blocks with max size of 2 if(root_block.next != nullptr) { auto next_block_last_id = erase_block->next->ids.last(); merge_adjacent_blocks(erase_block, erase_block->next, erase_block->next->size()/2); id_block_map.erase(next_block_last_id); id_block_map.emplace(erase_block->next->ids.last(), erase_block->next); id_block_map.emplace(erase_block->ids.last(), erase_block); } } id_block_map.erase(before_last_id); return; } if(new_ids_length >= BLOCK_MAX_ELEMENTS/2 || erase_block->next == nullptr) { last_id_t after_last_id = erase_block->ids.last(); if(before_last_id != after_last_id) { id_block_map.erase(before_last_id); id_block_map.emplace(after_last_id, erase_block); } return ; } // block is less than 50% of max capacity and contains a next node which we can refill from auto next_block = erase_block->next; last_id_t next_block_last_id = next_block->ids.last(); if(erase_block->size() + next_block->size() <= BLOCK_MAX_ELEMENTS) { // we can merge the contents of next block with `erase_block` and delete the next block merge_adjacent_blocks(erase_block, next_block, next_block->size()); erase_block->next = next_block->next; delete next_block; id_block_map.erase(next_block_last_id); } else { // Only part of the next block can be moved over. // We will move only 50% of max elements to ensure that we don't end up "flipping" adjacent blocks: // 1, 5 -> 5, 1 size_t num_block2_ids = BLOCK_MAX_ELEMENTS/2; merge_adjacent_blocks(erase_block, next_block, num_block2_ids); // NOTE: we don't have to update `id_block_map` for `next_block` as last element doesn't change } last_id_t after_last_id = erase_block->ids.last(); if(before_last_id != after_last_id) { id_block_map.erase(before_last_id); id_block_map.emplace(after_last_id, erase_block); } } posting_list_t::block_t* posting_list_t::get_root() { return &root_block; } size_t posting_list_t::num_blocks() const { return id_block_map.size(); } uint32_t posting_list_t::first_id() { if(ids_length == 0) { return 0; } return root_block.ids.at(0); } posting_list_t::block_t* posting_list_t::block_of(uint32_t id) { const auto it = id_block_map.lower_bound(id); if(it == id_block_map.end()) { return nullptr; } return it->second; } void posting_list_t::merge(const std::vector<posting_list_t*>& posting_lists, std::vector<uint32_t>& result_ids) { auto its = std::vector<posting_list_t::iterator_t>(); its.reserve(posting_lists.size()); size_t sum_sizes = 0; for(const auto& posting_list: posting_lists) { its.push_back(posting_list->new_iterator()); sum_sizes += posting_list->num_ids(); } if(its.size() == 1) { result_ids.reserve(posting_lists[0]->ids_length); auto it = posting_lists[0]->new_iterator(); while(it.valid()) { result_ids.push_back(it.id()); it.next(); } return ; } result_ids.reserve(sum_sizes); size_t num_lists = its.size(); switch (num_lists) { case 2: while(!at_end2(its)) { if(equals2(its)) { //LOG(INFO) << its[0].id(); result_ids.push_back(its[0].id()); advance_all2(its); } else { uint32_t smallest_value = advance_smallest2(its); result_ids.push_back(smallest_value); } } while(its[0].valid()) { result_ids.push_back(its[0].id()); its[0].next(); } while(its[1].valid()) { result_ids.push_back(its[1].id()); its[1].next(); } break; default: while(!at_end(its)) { if(equals(its)) { result_ids.push_back(its[0].id()); advance_all(its); } else { uint32_t smallest_value = advance_smallest(its); result_ids.push_back(smallest_value); } } for(auto& it: its) { while(it.valid()) { result_ids.push_back(it.id()); it.next(); } } } } // Inspired by: https://stackoverflow.com/a/25509185/131050 void posting_list_t::intersect(const std::vector<posting_list_t*>& posting_lists, std::vector<uint32_t>& result_ids) { if(posting_lists.empty()) { return; } if(posting_lists.size() == 1) { result_ids.reserve(posting_lists[0]->ids_length); auto it = posting_lists[0]->new_iterator(); while(it.valid()) { result_ids.push_back(it.id()); it.next(); } return ; } auto its = std::vector<posting_list_t::iterator_t>(); its.reserve(posting_lists.size()); for(const auto& posting_list: posting_lists) { its.push_back(posting_list->new_iterator()); } size_t num_lists = its.size(); switch (num_lists) { case 2: while(!at_end2(its)) { if(equals2(its)) { //LOG(INFO) << its[0].id(); result_ids.push_back(its[0].id()); advance_all2(its); } else { advance_non_largest2(its); } } break; default: while(!at_end(its)) { if(equals(its)) { //LOG(INFO) << its[0].id(); result_ids.push_back(its[0].id()); advance_all(its); } else { advance_non_largest(its); } } } } void posting_list_t::intersect(std::vector<posting_list_t::iterator_t>& posting_list_iterators, bool& is_valid) { if (posting_list_iterators.empty()) { is_valid = false; return; } if (posting_list_iterators.size() == 1) { is_valid = posting_list_iterators.front().valid(); return; } switch (posting_list_iterators.size()) { case 2: while(!at_end2(posting_list_iterators)) { if(equals2(posting_list_iterators)) { is_valid = true; return; } else { advance_non_largest2(posting_list_iterators); } } is_valid = false; break; default: while(!at_end(posting_list_iterators)) { if(equals(posting_list_iterators)) { is_valid = true; return; } else { advance_non_largest(posting_list_iterators); } } is_valid = false; } } bool posting_list_t::take_id(result_iter_state_t& istate, uint32_t id) { // decide if this result id should be excluded if(istate.excluded_result_ids_size != 0) { if (std::binary_search(istate.excluded_result_ids, istate.excluded_result_ids + istate.excluded_result_ids_size, id)) { return false; } } // decide if this result be matched with filter results if(istate.filter_ids_length != 0) { return std::binary_search(istate.filter_ids, istate.filter_ids + istate.filter_ids_length, id); } return true; } void posting_list_t::get_offsets(iterator_t& iter, std::vector<uint32_t>& positions) { block_t* curr_block = iter.block(); uint32_t curr_index = iter.index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { return; } uint32_t* offsets = iter.offsets; uint32_t start_offset = iter.offset_index[curr_index]; uint32_t end_offset = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : iter.offset_index[curr_index + 1]; while(start_offset < end_offset) { int pos = offsets[start_offset]; positions.push_back(pos); ++start_offset; } } bool posting_list_t::get_offsets(const std::vector<iterator_t>& its, std::map<size_t, std::vector<token_positions_t>>& array_token_pos) { // Plain string format: // offset1, offset2, ... , 0 (if token is the last offset for the document) // Array string format: // offset1, ... , offsetn, offsetn, array_index, 0 (if token is the last offset for the document) // NOTE 1: last offset is repeated to indicate end of offsets for a given array index) // NOTE 2: offsets are 1-index based (since 0 is used as last offset marking) // For each result ID and for each block it is contained in, calculate offsets size_t id_block_index = 0; for(size_t j = 0; j < its.size(); j++) { block_t* curr_block = its[j].block(); uint32_t curr_index = its[j].index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { continue; } uint32_t* offsets = its[j].offsets; uint32_t start_offset = its[j].offset_index[curr_index]; uint32_t end_offset = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : its[j].offset_index[curr_index + 1]; std::vector<uint16_t> positions; int prev_pos = -1; bool is_last_token = false; /*LOG(INFO) << "id: " << its[j].id() << ", start_offset: " << start_offset << ", end_offset: " << end_offset; for(size_t x = 0; x < end_offset; x++) { LOG(INFO) << "x: " << x << ", pos: " << offsets[x]; }*/ while(start_offset < end_offset) { int pos = offsets[start_offset]; start_offset++; if(pos == 0) { // indicates that token is the last token on the doc is_last_token = true; start_offset++; continue; } if(pos == prev_pos) { // indicates end of array index if(!positions.empty()) { size_t array_index = (size_t) offsets[start_offset]; is_last_token = false; if(start_offset+1 < end_offset) { size_t next_offset = (size_t) offsets[start_offset + 1]; if(next_offset == 0) { // indicates that token is the last token on the doc is_last_token = true; start_offset++; } } array_token_pos[array_index].push_back(token_positions_t{is_last_token, positions}); positions.clear(); } start_offset++; // skip current value which is the array index or flag for last index prev_pos = -1; continue; } prev_pos = pos; positions.push_back((uint16_t)pos - 1); } if(!positions.empty()) { // for plain string fields array_token_pos[0].push_back(token_positions_t{is_last_token, positions}); } } return true; } bool posting_list_t::is_single_token_verbatim_match(const posting_list_t::iterator_t& it, bool field_is_array) { block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { return false; } uint32_t* offsets = it.offsets; uint32_t start_offset = it.offset_index[curr_index]; if(!field_is_array && offsets[start_offset] != 1) { // allows us to skip other computes fast return false; } uint32_t end_offset = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; if(field_is_array) { int prev_pos = -1; while(start_offset < end_offset) { int pos = offsets[start_offset]; start_offset++; if(pos == prev_pos && pos == 1 && start_offset+1 < end_offset && offsets[start_offset+1] == 0) { return true; } prev_pos = pos; } return false; } else if((end_offset - start_offset) == 2 && offsets[end_offset-1] == 0) { // we've already checked for `offsets[start_offset] == 1` earlier return true; } return false; } bool posting_list_t::at_end(const std::vector<posting_list_t::iterator_t>& its) { // if any one iterator is at end, we can stop for(const auto& it : its) { if(!it.valid()) { return true; } } return false; } bool posting_list_t::at_end2(const std::vector<posting_list_t::iterator_t>& its) { // if any one iterator is at end, we can stop return !its[0].valid() || !its[1].valid(); } bool posting_list_t::equals(std::vector<posting_list_t::iterator_t>& its) { for(int i = 0; i < int(its.size()) - 1; i++) { if(its[i].id() != its[i+1].id()) { return false; } } return true; } bool posting_list_t::equals2(std::vector<posting_list_t::iterator_t>& its) { return its[0].id() == its[1].id(); } posting_list_t::iterator_t posting_list_t::new_iterator(block_t* start_block, block_t* end_block, uint32_t field_id) { start_block = (start_block == nullptr) ? &root_block : start_block; return posting_list_t::iterator_t(&id_block_map, start_block, end_block, true, field_id); } posting_list_t::iterator_t posting_list_t::new_rev_iterator() { block_t* start_block = nullptr; if(!id_block_map.empty()) { start_block = id_block_map.rbegin()->second; } auto rev_it = posting_list_t::iterator_t(&id_block_map, start_block, nullptr, true, 0, true); return rev_it; } void posting_list_t::advance_all(std::vector<posting_list_t::iterator_t>& its) { for(auto& it: its) { it.next(); } } void posting_list_t::advance_all2(std::vector<posting_list_t::iterator_t>& its) { its[0].next(); its[1].next(); } void posting_list_t::advance_non_largest(std::vector<posting_list_t::iterator_t>& its) { // we will find the iter with greatest value and then advance the rest until their value catches up uint32_t greatest_value = 0; for(size_t i = 0; i < its.size(); i++) { if(its[i].id() > greatest_value) { greatest_value = its[i].id(); } } for(size_t i = 0; i < its.size(); i++) { if(its[i].id() != greatest_value) { its[i].skip_to(greatest_value); } } } void posting_list_t::advance_non_largest2(std::vector<posting_list_t::iterator_t>& its) { if(its[0].id() > its[1].id()) { its[1].skip_to(its[0].id()); } else { its[0].skip_to(its[1].id()); } } uint32_t posting_list_t::advance_smallest(std::vector<posting_list_t::iterator_t>& its) { // we will advance the iterator(s) with the smallest value and then return that value uint32_t smallest_value = UINT32_MAX; for(size_t i = 0; i < its.size(); i++) { if(its[i].id() < smallest_value) { smallest_value = its[i].id(); } } for(size_t i = 0; i < its.size(); i++) { if(its[i].id() == smallest_value) { its[i].next(); } } return smallest_value; } uint32_t posting_list_t::advance_smallest2(std::vector<posting_list_t::iterator_t>& its) { uint32_t smallest_value = 0; if(its[0].id() < its[1].id()) { smallest_value = its[0].id(); its[0].next(); } else { smallest_value = its[1].id(); its[1].next(); } return smallest_value; } size_t posting_list_t::num_ids() const { return ids_length; } bool posting_list_t::contains(uint32_t id) { const auto it = id_block_map.lower_bound(id); if(it == id_block_map.end()) { return false; } block_t* potential_block = it->second; return potential_block->contains(id); } bool posting_list_t::contains_atleast_one(const uint32_t* target_ids, size_t target_ids_size) { posting_list_t::iterator_t it = new_iterator(); size_t target_ids_index = 0; while(target_ids_index < target_ids_size && it.valid()) { uint32_t id = it.id(); if(id == target_ids[target_ids_index]) { return true; } else { // advance smallest value if(id > target_ids[target_ids_index]) { while(target_ids_index < target_ids_size && target_ids[target_ids_index] < id) { target_ids_index++; } } else { it.skip_to(target_ids[target_ids_index]); } } } return false; } bool posting_list_t::is_single_token_prefix_match(const posting_list_t::iterator_t& it, bool field_is_array) { block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if (curr_block == nullptr || curr_index == UINT32_MAX) { return false; } uint32_t* offsets = it.offsets; uint32_t start_offset = it.offset_index[curr_index]; // If the field value starts with the token, it's a match. return offsets[start_offset] == 1; } void posting_list_t::get_prefix_matches(std::vector<iterator_t>& its, const bool field_is_array, const uint32_t* ids, const uint32_t num_ids, uint32_t*& prefix_ids, size_t& num_prefix_ids) { size_t prefix_id_index = 0; if (its.size() == 1) { for (size_t i = 0; i < num_ids; i++) { auto const& id = ids[i]; its[0].skip_to(id); if (is_single_token_prefix_match(its[0], field_is_array)) { prefix_ids[prefix_id_index++] = id; } } } else { if (!field_is_array) { for (size_t i = 0; i < num_ids; i++) { uint32_t id = ids[i]; bool is_match = true; for (int j = its.size()-1; j >= 0; j--) { posting_list_t::iterator_t& it = its[j]; it.skip_to(id); block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if (curr_block == nullptr || curr_index == UINT32_MAX) { is_match = false; break; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; // looping handles duplicate query tokens, e.g. "hip hip hurray hurray" while (start_offset_index < end_offset_index) { uint32_t offset = offsets[start_offset_index]; start_offset_index++; if (offset == (j + 1)) { // we have found a matching index, no need to look further for this token is_match = true; break; } if (offset > (j + 1)) { is_match = false; break; } } if (!is_match) { break; } } if (is_match) { prefix_ids[prefix_id_index++] = id; } } } else { // field is an array struct token_index_meta_t { std::bitset<128> token_index; }; for (size_t i = 0; i < num_ids; i++) { uint32_t id = ids[i]; std::map<size_t, token_index_meta_t> array_index_to_token_index; bool premature_exit = false; for (int j = its.size()-1; j >= 0; j--) { posting_list_t::iterator_t& it = its[j]; it.skip_to(id); block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if (curr_block == nullptr || curr_index == UINT32_MAX) { premature_exit = true; break; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; int prev_pos = -1; bool found_matching_index = false; size_t num_matching_index = 0; while (start_offset_index < end_offset_index) { int pos = offsets[start_offset_index]; start_offset_index++; if (pos == prev_pos) { // indicates end of array index size_t array_index = (size_t) offsets[start_offset_index]; if (found_matching_index && j+1 < 128) { array_index_to_token_index[array_index].token_index.set(j+1); } start_offset_index++; // skip current value which is the array index or flag for last index prev_pos = -1; found_matching_index = false; continue; } if (pos == (j + 1)) { // we have found a matching index found_matching_index = true; num_matching_index++; } prev_pos = pos; } if (num_matching_index == 0) { // not even a single matching index found: can never be an exact match premature_exit = true; break; } } if (!premature_exit) { // iterate array index to token index to check if atleast 1 array position contains all tokens for (auto& kv: array_index_to_token_index) { if (kv.second.token_index.count() == its.size()) { prefix_ids[prefix_id_index++] = id; break; } } } } } } num_prefix_ids = prefix_id_index; } void posting_list_t::get_exact_matches(std::vector<iterator_t>& its, const bool field_is_array, const uint32_t* ids, const uint32_t num_ids, uint32_t*& exact_ids, size_t& num_exact_ids) { size_t exact_id_index = 0; if(its.size() == 1) { for(size_t i = 0; i < num_ids; i++) { uint32_t id = ids[i]; its[0].skip_to(id); if(is_single_token_verbatim_match(its[0], field_is_array)) { exact_ids[exact_id_index++] = id; } } } else { if(!field_is_array) { for(size_t i = 0; i < num_ids; i++) { uint32_t id = ids[i]; bool is_exact_match = true; for(int j = its.size()-1; j >= 0; j--) { posting_list_t::iterator_t& it = its[j]; it.skip_to(id); block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { is_exact_match = false; break; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; if(j == its.size()-1) { // check if the last query token is the last offset if( offsets[end_offset_index-1] != 0 || (end_offset_index-2 >= 0 && offsets[end_offset_index-2] != its.size())) { // not the last token for the document, so skip is_exact_match = false; break; } } // looping handles duplicate query tokens, e.g. "hip hip hurray hurray" while(start_offset_index < end_offset_index) { uint32_t offset = offsets[start_offset_index]; start_offset_index++; if(offset == (j + 1)) { // we have found a matching index, no need to look further is_exact_match = true; break; } if(offset > (j + 1)) { is_exact_match = false; break; } } if(!is_exact_match) { break; } } if(is_exact_match) { exact_ids[exact_id_index++] = id; } } } else { // field is an array struct token_index_meta_t { std::bitset<128> token_index; bool has_last_token; }; for(size_t i = 0; i < num_ids; i++) { uint32_t id = ids[i]; std::map<size_t, token_index_meta_t> array_index_to_token_index; bool premature_exit = false; for(int j = its.size()-1; j >= 0; j--) { posting_list_t::iterator_t& it = its[j]; it.skip_to(id); block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { premature_exit = true; break; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; int prev_pos = -1; bool has_atleast_one_last_token = false; bool found_matching_index = false; size_t num_matching_index = 0; while(start_offset_index < end_offset_index) { int pos = offsets[start_offset_index]; start_offset_index++; if(pos == prev_pos) { // indicates end of array index size_t array_index = (size_t) offsets[start_offset_index]; if(start_offset_index+1 < end_offset_index) { size_t next_offset = (size_t) offsets[start_offset_index + 1]; if(next_offset == 0 && pos == its.size()) { // indicates that token is the last token on the doc array_index_to_token_index[array_index].has_last_token = true; has_atleast_one_last_token = true; start_offset_index++; } } if(found_matching_index && j+1 < 128) { array_index_to_token_index[array_index].token_index.set(j+1); } start_offset_index++; // skip current value which is the array index or flag for last index prev_pos = -1; found_matching_index = false; continue; } if(pos == (j + 1)) { // we have found a matching index found_matching_index = true; num_matching_index++; } prev_pos = pos; } // check if the last query token is the last offset of ANY array element if(j == its.size()-1 && !has_atleast_one_last_token) { premature_exit = true; break; } if(num_matching_index == 0) { // not even a single matching index found: can never be an exact match premature_exit = true; break; } } if(!premature_exit) { // iterate array index to token index to check if atleast 1 array position contains all tokens for(auto& kv: array_index_to_token_index) { if(kv.second.token_index.count() == its.size() && kv.second.has_last_token) { exact_ids[exact_id_index++] = id; break; } } } } } } num_exact_ids = exact_id_index; } bool posting_list_t::has_prefix_match(std::vector<posting_list_t::iterator_t>& posting_list_iterators, const bool field_is_array) { if (posting_list_iterators.empty()) { return false; } if (posting_list_iterators.size() == 1) { return is_single_token_prefix_match(posting_list_iterators[0], field_is_array); } if (!field_is_array) { for (uint32_t i = 0; i < posting_list_iterators.size(); i++) { posting_list_t::iterator_t& it = posting_list_iterators[i]; block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if (curr_block == nullptr || curr_index == UINT32_MAX) { return false; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; // looping handles duplicate query tokens, e.g. "hip hip hurray hurray" while (start_offset_index < end_offset_index) { uint32_t offset = offsets[start_offset_index]; start_offset_index++; if (offset == (i + 1)) { // we have found a matching index, no need to look further for this token. break; } if (offset > (i + 1)) { return false; } } } } else { // field is an array struct token_index_meta_t { std::bitset<128> token_index; }; std::map<size_t, token_index_meta_t> array_index_to_token_index; for (int i = posting_list_iterators.size() - 1; i >= 0; i--) { posting_list_t::iterator_t& it = posting_list_iterators[i]; block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if (curr_block == nullptr || curr_index == UINT32_MAX) { return false; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; int prev_pos = -1; bool found_matching_index = false; size_t num_matching_index = 0; while (start_offset_index < end_offset_index) { int pos = offsets[start_offset_index]; start_offset_index++; if (pos == prev_pos) { // indicates end of array index size_t array_index = (size_t) offsets[start_offset_index]; if (found_matching_index && i+1 < 128) { array_index_to_token_index[array_index].token_index.set(i + 1); } start_offset_index++; // skip current value which is the array index or flag for last index prev_pos = -1; found_matching_index = false; continue; } if (pos == (i + 1)) { // we have found a matching index found_matching_index = true; num_matching_index++; } prev_pos = pos; } if (num_matching_index == 0) { // not even a single matching index found: can never be an exact match return false; } } // iterate array index to token index to check if atleast 1 array position contains all tokens for (auto& kv: array_index_to_token_index) { if (kv.second.token_index.count() == posting_list_iterators.size()) { return true; } } } return true; } bool posting_list_t::has_exact_match(std::vector<posting_list_t::iterator_t>& posting_list_iterators, const bool field_is_array) { if(posting_list_iterators.size() == 1) { return is_single_token_verbatim_match(posting_list_iterators[0], field_is_array); } else { if (!field_is_array) { for (int i = posting_list_iterators.size() - 1; i >= 0; i--) { posting_list_t::iterator_t& it = posting_list_iterators[i]; block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { return false; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; if(i == posting_list_iterators.size() - 1) { // check if the last query token is the last offset if( offsets[end_offset_index-1] != 0 || (end_offset_index-2 >= 0 && offsets[end_offset_index-2] != posting_list_iterators.size())) { // not the last token for the document, so skip return false; } } // looping handles duplicate query tokens, e.g. "hip hip hurray hurray" while(start_offset_index < end_offset_index) { uint32_t offset = offsets[start_offset_index]; start_offset_index++; if(offset == (i + 1)) { // we have found a matching index, no need to look further for this token. break; } if(offset > (i + 1)) { return false; } } } } else { // field is an array struct token_index_meta_t { std::bitset<128> token_index; bool has_last_token; }; std::map<size_t, token_index_meta_t> array_index_to_token_index; for(int i = posting_list_iterators.size() - 1; i >= 0; i--) { posting_list_t::iterator_t& it = posting_list_iterators[i]; block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { return false; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; int prev_pos = -1; bool has_atleast_one_last_token = false; bool found_matching_index = false; size_t num_matching_index = 0; while(start_offset_index < end_offset_index) { int pos = offsets[start_offset_index]; start_offset_index++; if(pos == prev_pos) { // indicates end of array index size_t array_index = (size_t) offsets[start_offset_index]; if(start_offset_index+1 < end_offset_index) { size_t next_offset = (size_t) offsets[start_offset_index + 1]; if(next_offset == 0 && pos == posting_list_iterators.size()) { // indicates that token is the last token on the doc array_index_to_token_index[array_index].has_last_token = true; has_atleast_one_last_token = true; start_offset_index++; } } if(found_matching_index && i+1 < 128) { array_index_to_token_index[array_index].token_index.set(i + 1); } start_offset_index++; // skip current value which is the array index or flag for last index prev_pos = -1; found_matching_index = false; continue; } if(pos == (i + 1)) { // we have found a matching index found_matching_index = true; num_matching_index++; } prev_pos = pos; } // check if the last query token is the last offset of ANY array element if(i == posting_list_iterators.size() - 1 && !has_atleast_one_last_token) { return false; } if(num_matching_index == 0) { // not even a single matching index found: can never be an exact match return false; } } // iterate array index to token index to check if atleast 1 array position contains all tokens for(auto& kv: array_index_to_token_index) { if(kv.second.token_index.count() == posting_list_iterators.size() && kv.second.has_last_token) { return true; } } } } return true; } bool posting_list_t::found_token_sequence(const std::vector<token_positions_t>& token_positions, const size_t token_index, const uint16_t target_pos) { if(token_index == token_positions.size()) { return true; } // iterate through the positions and see if `target_pos` is found in token positions const auto& tok_positions = token_positions[token_index].positions; bool found_pos = false; int prev_pos = -1; for(auto tok_pos: tok_positions) { if(tok_pos < prev_pos) { // indicates that the positions are wrapping around found_pos = false; break; } if(tok_pos == target_pos) { found_pos = true; break; } prev_pos = tok_pos; } if(!found_pos) { return false; } return found_token_sequence(token_positions, token_index+1, target_pos+1); } bool posting_list_t::has_phrase_match(const std::vector<token_positions_t>& token_positions) { const auto& positions = token_positions[0].positions; int prev_pos = -1; for(auto pos: positions) { if(pos < prev_pos) { // indicates that the positions are wrapping around return false; } if(found_token_sequence(token_positions, 1, pos + 1)) { return true; } prev_pos = pos; } return false; } void posting_list_t::get_phrase_matches(std::vector<iterator_t>& its, bool field_is_array, const uint32_t* ids, const uint32_t num_ids, uint32_t*& phrase_ids, size_t& num_phrase_ids) { size_t phrase_id_index = 0; if(its.size() == 1) { for(size_t i = 0; i < num_ids; i++) { phrase_ids[phrase_id_index] = ids[i]; phrase_id_index++; } } else { for(size_t i = 0; i < num_ids; i++) { uint32_t id = ids[i]; for (int j = its.size() - 1; j >= 0; j--) { posting_list_t::iterator_t& it = its[j]; it.skip_to(id); } std::map<size_t, std::vector<token_positions_t>> array_token_positions; get_offsets(its, array_token_positions); for(auto& kv: array_token_positions) { const auto& token_positions = kv.second; if(token_positions.size() == its.size() && has_phrase_match(token_positions)) { phrase_ids[phrase_id_index] = ids[i]; phrase_id_index++; break; } } } } num_phrase_ids = phrase_id_index; } void posting_list_t::get_matching_array_indices(uint32_t id, std::vector<iterator_t>& its, std::vector<size_t>& indices) { std::map<size_t, std::bitset<32>> array_index_to_token_index; for(int j = its.size()-1; j >= 0; j--) { posting_list_t::iterator_t& it = its[j]; it.skip_to(id); block_t* curr_block = it.block(); uint32_t curr_index = it.index(); if(curr_block == nullptr || curr_index == UINT32_MAX) { return; } uint32_t* offsets = it.offsets; uint32_t start_offset_index = it.offset_index[curr_index]; uint32_t end_offset_index = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; int prev_pos = -1; while(start_offset_index < end_offset_index) { int pos = offsets[start_offset_index]; start_offset_index++; if(pos == prev_pos) { // indicates end of array index size_t array_index = (size_t) offsets[start_offset_index]; if(start_offset_index+1 < end_offset_index) { size_t next_offset = (size_t) offsets[start_offset_index + 1]; if(next_offset == 0) { // indicates that token is the last token on the doc start_offset_index++; } } array_index_to_token_index[array_index].set(j+1); start_offset_index++; // skip current value which is the array index or flag for last index prev_pos = -1; continue; } prev_pos = pos; } } // iterate array index to token index to check if atleast 1 array position contains all tokens for(auto& kv: array_index_to_token_index) { if(kv.second.count() == its.size()) { indices.push_back(kv.first); } } } bool posting_list_t::all_ended(const std::vector<posting_list_t::iterator_t>& its) { // if all iterators are at end, we return true for(const auto& it : its) { if(it.valid()) { return false; } } return true; } bool posting_list_t::all_ended2(const std::vector<posting_list_t::iterator_t>& its) { // if both iterators are at end, we return true return !its[0].valid() && !its[1].valid(); } size_t posting_list_t::get_last_offset(const posting_list_t::iterator_t& it, bool field_is_array) { block_t* curr_block = it.block(); uint32_t curr_index = it.index(); uint32_t* offsets = it.offsets; if(curr_block == nullptr || curr_index == UINT32_MAX) { return 0; } uint32_t end_offset = (curr_index == curr_block->size() - 1) ? curr_block->offsets.getLength() : it.offset_index[curr_index + 1]; if(field_is_array) { uint32_t start_offset = it.offset_index[curr_index]; int prev_pos = -1; size_t max_offset = 0; while(start_offset < end_offset) { int pos = offsets[start_offset]; start_offset++; if(pos > max_offset) { max_offset = pos; } if(pos == prev_pos) { // indicates end of array index size_t array_index = (size_t) offsets[start_offset]; if(start_offset+1 < end_offset) { size_t next_offset = (size_t) offsets[start_offset + 1]; if(next_offset == 0) { // indicates that token is the last token on the doc start_offset++; } } start_offset++; // skip current value which is the array index or flag for last index prev_pos = -1; continue; } prev_pos = pos; } return max_offset; } else { return offsets[end_offset-1] == 0 ? offsets[end_offset-2] : offsets[end_offset-1]; } return 0; } /* iterator_t operations */ posting_list_t::iterator_t::iterator_t(const std::map<last_id_t, block_t*>* id_block_map, posting_list_t::block_t* start, posting_list_t::block_t* end, bool auto_destroy, uint32_t field_id, bool reverse): id_block_map(id_block_map), curr_block(start), curr_index(0), end_block(end), auto_destroy(auto_destroy), field_id(field_id) { if(curr_block != end_block) { ids = curr_block->ids.uncompress(); offset_index = curr_block->offset_index.uncompress(); offsets = curr_block->offsets.uncompress(); if(reverse) { curr_index = curr_block->ids.getLength()-1; } } } bool posting_list_t::iterator_t::valid() const { return (curr_block != end_block) && (curr_index < curr_block->size()); } void posting_list_t::iterator_t::next() { curr_index++; if(curr_index == curr_block->size()) { curr_index = 0; curr_block = curr_block->next; delete [] ids; delete [] offset_index; delete [] offsets; ids = offset_index = offsets = nullptr; if(curr_block != end_block) { ids = curr_block->ids.uncompress(); offset_index = curr_block->offset_index.uncompress(); offsets = curr_block->offsets.uncompress(); } } } uint32_t posting_list_t::iterator_t::last_block_id() const { auto size = curr_block->size(); if(size == 0) { return 0; } return ids[size - 1]; } uint32_t posting_list_t::iterator_t::first_block_id() const { auto size = curr_block->size(); if(size == 0) { return 0; } return ids[0]; } uint32_t posting_list_t::iterator_t::id() const { return ids[curr_index]; } uint32_t posting_list_t::iterator_t::offset() const { return offsets[offset_index[curr_index]]; } uint32_t posting_list_t::iterator_t::index() const { return curr_index; } posting_list_t::block_t* posting_list_t::iterator_t::block() const { return curr_block; } void posting_list_t::iterator_t::skip_to(uint32_t id) { // first look to skip within current block if(id <= this->last_block_id()) { while(curr_index < curr_block->size() && this->id() < id) { curr_index++; } return ; } // identify the block where the id could exist and skip to that reset_cache(); const auto it = id_block_map->lower_bound(id); if(it == id_block_map->end()) { return; } curr_block = it->second; curr_index = 0; ids = curr_block->ids.uncompress(); offset_index = curr_block->offset_index.uncompress(); offsets = curr_block->offsets.uncompress(); while(curr_index < curr_block->size() && this->id() < id) { curr_index++; } if(curr_index == curr_block->size()) { reset_cache(); } } void posting_list_t::iterator_t::skip_to_rev(uint32_t id) { // first look to skip within current block if(id >= this->first_block_id()) { while(curr_index > 0 && this->id() > id) { curr_index--; } return ; } // identify the block where the id could exist and skip to that reset_cache(); const auto it = id_block_map->lower_bound(id); if(it == id_block_map->end()) { return; } curr_block = it->second; curr_index = curr_block->size()-1; ids = curr_block->ids.uncompress(); offset_index = curr_block->offset_index.uncompress(); offsets = curr_block->offsets.uncompress(); while(curr_index > 0 && this->id() > id) { curr_index--; } if(curr_index == UINT32_MAX) { reset_cache(); } } posting_list_t::iterator_t::~iterator_t() { if(auto_destroy) { reset_cache(); } } void posting_list_t::iterator_t::reset_cache() { delete [] ids; delete [] offsets; delete [] offset_index; ids = offset_index = offsets = nullptr; curr_index = 0; curr_block = end_block = nullptr; } posting_list_t::iterator_t::iterator_t(iterator_t&& rhs) noexcept { id_block_map = rhs.id_block_map; curr_block = rhs.curr_block; curr_index = rhs.curr_index; end_block = rhs.end_block; ids = rhs.ids; offset_index = rhs.offset_index; offsets = rhs.offsets; auto_destroy = rhs.auto_destroy; field_id = rhs.field_id; rhs.id_block_map = nullptr; rhs.curr_block = nullptr; rhs.end_block = nullptr; rhs.ids = nullptr; rhs.offset_index = nullptr; rhs.offsets = nullptr; } posting_list_t::iterator_t& posting_list_t::iterator_t::operator=(posting_list_t::iterator_t&& rhs) noexcept { id_block_map = rhs.id_block_map; curr_block = rhs.curr_block; curr_index = rhs.curr_index; end_block = rhs.end_block; ids = rhs.ids; offset_index = rhs.offset_index; offsets = rhs.offsets; auto_destroy = rhs.auto_destroy; field_id = rhs.field_id; rhs.id_block_map = nullptr; rhs.curr_block = nullptr; rhs.end_block = nullptr; rhs.ids = nullptr; rhs.offset_index = nullptr; rhs.offsets = nullptr; return *this; } void posting_list_t::iterator_t::set_index(uint32_t index) { curr_index = index; } posting_list_t::iterator_t posting_list_t::iterator_t::clone() const { posting_list_t::iterator_t it(nullptr, nullptr, nullptr); it.id_block_map = id_block_map; it.curr_block = curr_block; it.curr_index = curr_index; it.end_block = end_block; it.ids = ids; it.offsets = offsets; it.offset_index = offset_index; it.auto_destroy = false; it.field_id = field_id; return it; } uint32_t posting_list_t::iterator_t::get_field_id() const { return field_id; } bool result_iter_state_t::is_filter_provided() const { return filter_ids_length > 0 || (fit != nullptr && fit->is_filter_provided()); } bool result_iter_state_t::is_filter_valid() const { if (filter_ids_length > 0) { return filter_ids_index < filter_ids_length; } if (fit != nullptr) { return fit->validity == filter_result_iterator_t::valid; } return false; } uint32_t result_iter_state_t::get_filter_id() const { if (filter_ids_length > 0 && filter_ids_index < filter_ids_length) { return filter_ids[filter_ids_index]; } if (fit != nullptr && fit->validity == filter_result_iterator_t::valid) { return fit->seq_id; } return 0; }
74,707
C++
.cpp
1,732
30.77194
123
0.522373
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,788
thread_local_vars.cpp
typesense_typesense/src/thread_local_vars.cpp
#include <cstdint> #include "thread_local_vars.h" thread_local int64_t write_log_index = 0; thread_local uint64_t search_begin_us; thread_local uint64_t search_stop_us; thread_local bool search_cutoff = false;
211
C++
.cpp
6
34
41
0.789216
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,789
http_client.cpp
typesense_typesense/src/http_client.cpp
#include "http_client.h" #include "file_utils.h" #include "logger.h" #include <vector> #include <json.hpp> std::string HttpClient::api_key = ""; std::string HttpClient::ca_cert_path = ""; struct client_state_t: public req_state_t { CURL* curl; client_state_t(CURL* curl): curl(curl) { } }; long HttpClient::post_response(const std::string &url, const std::string &body, std::string &response, std::map<std::string, std::string>& res_headers, const std::unordered_map<std::string, std::string>& headers, long timeout_ms, bool send_ts_api_header) { CURL *curl = init_curl(url, response, timeout_ms); if(curl == nullptr) { return 500; } curl_easy_setopt(curl, CURLOPT_POSTFIELDS, body.c_str()); struct curl_slist *chunk = nullptr; for(const auto& header: headers) { std::string header_str = header.first + ": " + header.second; chunk = curl_slist_append(chunk, header_str.c_str()); } return perform_curl(curl, res_headers, chunk, send_ts_api_header); } long HttpClient::post_response_stream(const std::string &url, const std::string &body, async_stream_response_t &response, std::map<std::string, std::string>& res_headers, const std::unordered_map<std::string, std::string>& headers, long timeout_ms) { struct curl_slist* chunk = nullptr; CURL *curl = init_curl_stream(url, response, timeout_ms); if(curl == nullptr) { return 500; } for(const auto& header: headers) { std::string header_str = header.first + ": " + header.second; chunk = curl_slist_append(chunk, header_str.c_str()); } curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk); curl_easy_setopt(curl, CURLOPT_POST, 1L); curl_easy_setopt(curl, CURLOPT_POSTFIELDS, body.c_str()); curl_easy_perform(curl); long status_code = 0; curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status_code); curl_easy_cleanup(curl); curl_slist_free_all(chunk); return status_code; } long HttpClient::post_response_async(const std::string &url, const std::shared_ptr<http_req> request, const std::shared_ptr<http_res> response, HttpServer* server, bool send_ts_api_header) { deferred_req_res_t* req_res = new deferred_req_res_t(request, response, server, false); std::unique_ptr<deferred_req_res_t> req_res_guard(req_res); struct curl_slist* chunk = nullptr; CURL *curl = init_curl_async(url, req_res, chunk, send_ts_api_header); if(curl == nullptr) { return 500; } curl_easy_setopt(curl, CURLOPT_POST, 1L); curl_easy_perform(curl); curl_easy_cleanup(curl); curl_slist_free_all(chunk); return 0; } long HttpClient::put_response(const std::string &url, const std::string &body, std::string &response, std::map<std::string, std::string>& res_headers, long timeout_ms, bool send_ts_api_header) { CURL *curl = init_curl(url, response, timeout_ms); if(curl == nullptr) { return 500; } curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT"); curl_easy_setopt(curl, CURLOPT_POSTFIELDS, body.c_str()); return perform_curl(curl, res_headers, nullptr, send_ts_api_header); } long HttpClient::patch_response(const std::string &url, const std::string &body, std::string &response, std::map<std::string, std::string>& res_headers, long timeout_ms, bool send_ts_api_header) { CURL *curl = init_curl(url, response, timeout_ms); if(curl == nullptr) { return 500; } curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PATCH"); curl_easy_setopt(curl, CURLOPT_POSTFIELDS, body.c_str()); return perform_curl(curl, res_headers, nullptr, send_ts_api_header); } long HttpClient::delete_response(const std::string &url, std::string &response, std::map<std::string, std::string>& res_headers, long timeout_ms, bool send_ts_api_header) { CURL *curl = init_curl(url, response, timeout_ms); if(curl == nullptr) { return 500; } curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "DELETE"); return perform_curl(curl, res_headers, nullptr, send_ts_api_header); } long HttpClient::get_response(const std::string &url, std::string &response, std::map<std::string, std::string>& res_headers, const std::unordered_map<std::string, std::string>& headers, long timeout_ms, bool send_ts_api_header) { CURL *curl = init_curl(url, response, timeout_ms); if(curl == nullptr) { return 500; } struct curl_slist *chunk = nullptr; for(const auto& header: headers) { std::string header_str = header.first + ": " + header.second; chunk = curl_slist_append(chunk, header_str.c_str()); } // follow redirects curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); return perform_curl(curl, res_headers, chunk, send_ts_api_header); } void HttpClient::init(const std::string &api_key) { HttpClient::api_key = api_key; // try to locate ca cert file (from: https://serverfault.com/a/722646/117601) std::vector<std::string> locations = { "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc. "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6 "/etc/ssl/ca-bundle.pem", // OpenSUSE "/etc/pki/tls/cacert.pem", // OpenELEC "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7 "/usr/local/etc/openssl/cert.pem", // OSX "/usr/local/etc/openssl@1.1/cert.pem", // OSX }; HttpClient::ca_cert_path = ""; for(const std::string & location: locations) { if(file_exists(location)) { HttpClient::ca_cert_path = location; break; } } } long HttpClient::perform_curl(CURL *curl, std::map<std::string, std::string>& res_headers, struct curl_slist *chunk, bool send_ts_api_header) { if(send_ts_api_header) { std::string api_key_header = std::string("x-typesense-api-key: ") + HttpClient::api_key; chunk = curl_slist_append(chunk, api_key_header.c_str()); } curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk); CURLcode res = curl_easy_perform(curl); if (res != CURLE_OK) { char* url = nullptr; char *method = nullptr; curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &url); curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_METHOD, &method); long status_code = 0; if(res == CURLE_OPERATION_TIMEDOUT) { double total_time; curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME, &total_time); LOG(ERROR) << "CURL timeout. Time taken: " << total_time << ", method: " << method << ", url: " << url; status_code = 408; } else { LOG(ERROR) << "CURL failed. Code: " << res << ", strerror: " << curl_easy_strerror(res) << ", method: " << method << ", url: " << url; status_code = 500; } curl_easy_cleanup(curl); curl_slist_free_all(chunk); return status_code; } long http_code = 500; curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &http_code); extract_response_headers(curl, res_headers); curl_easy_cleanup(curl); curl_slist_free_all(chunk); return http_code == 0 ? 500 : http_code; } void HttpClient::extract_response_headers(CURL* curl, std::map<std::string, std::string> &res_headers) { char* content_type; CURLcode res = curl_easy_getinfo (curl, CURLINFO_CONTENT_TYPE, &content_type); if(res == CURLE_OK && content_type != nullptr) { res_headers.emplace("content-type", content_type); } } size_t HttpClient::curl_req_send_callback(char* buffer, size_t size, size_t nitems, void* userdata) { //LOG(INFO) << "curl_req_send_callback"; // callback for request body to be sent to remote host deferred_req_res_t* req_res = static_cast<deferred_req_res_t *>(userdata); if(!req_res->res->is_alive) { // underlying client request is dead, don't proxy anymore data to upstream (leader) //LOG(INFO) << "req_res->req->req is: null"; return 0; } size_t max_req_bytes = (size * nitems); const char* total_body_buf = req_res->req->body.c_str(); size_t available_body_bytes = (req_res->req->body.size() - req_res->req->body_index); // copy data into `buffer` not exceeding max_req_bytes size_t bytes_to_read = std::min(max_req_bytes, available_body_bytes); memcpy(buffer, total_body_buf + req_res->req->body_index, bytes_to_read); req_res->req->body_index += bytes_to_read; /*LOG(INFO) << "Wrote " << bytes_to_read << " bytes to request body (max_buffer_bytes=" << max_req_bytes << ")"; LOG(INFO) << "req_res->req->body_index: " << req_res->req->body_index << ", req_res->req->body.size(): " << req_res->req->body.size();*/ if(req_res->req->body_index == req_res->req->body.size()) { //LOG(INFO) << "Current body buffer has been consumed fully."; req_res->req->body_index = 0; req_res->req->body = ""; HttpServer *server = req_res->server; server->get_message_dispatcher()->send_message(HttpServer::REQUEST_PROCEED_MESSAGE, req_res); if(!req_res->req->last_chunk_aggregate) { //LOG(INFO) << "Waiting for request body to be ready"; req_res->req->wait(); //LOG(INFO) << "Request body is ready"; //LOG(INFO) << "Buffer refilled, unpausing request forwarding, body_size=" << req_res->req->body.size(); } } return bytes_to_read; } size_t HttpClient::curl_write_async(char *buffer, size_t size, size_t nmemb, void *context) { // callback for response body to be sent back to client //LOG(INFO) << "curl_write_async"; deferred_req_res_t* req_res = static_cast<deferred_req_res_t *>(context); if(!req_res->res->is_alive) { // underlying client request is dead, don't try to send anymore data return 0; } size_t res_size = size * nmemb; // set headers if not already set if(req_res->res->status_code == 0) { client_state_t* client_state = dynamic_cast<client_state_t*>(req_res->req->data); CURL* curl = client_state->curl; long http_code = 500; CURLcode res = curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &http_code); if(res == CURLE_OK) { req_res->res->status_code = http_code; } char* content_type; res = curl_easy_getinfo (curl, CURLINFO_CONTENT_TYPE, &content_type); if(res == CURLE_OK && content_type != nullptr) { req_res->res->content_type_header = content_type; } } // we've got response from remote host: write to client and ask for more request body req_res->res->body = std::string(buffer, res_size); req_res->res->final = false; //LOG(INFO) << "curl_write_async response, res body size: " << req_res->res->body.size(); // wait for previous chunk to finish (if any) //LOG(INFO) << "Waiting on req_res " << req_res->res; req_res->res->wait(); async_req_res_t* async_req_res = new async_req_res_t(req_res->req, req_res->res, true); req_res->server->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, async_req_res); // wait until response is sent //LOG(INFO) << "Response sent"; return res_size; } size_t HttpClient::curl_write_stream(char *buffer, size_t size, size_t nmemb, void *context) { size_t res_size = size * nmemb; auto res = reinterpret_cast<async_stream_response_t*>(context); res->response_chunks.emplace_back(std::string(buffer, res_size)); return res_size; } size_t HttpClient::curl_write_stream_done(void *context, curl_socket_t item) { auto res = reinterpret_cast<async_stream_response_t*>(context); std::unique_lock<std::mutex> lock(res->mutex); res->ready = true; res->cv.notify_one(); close(item); return 0; } size_t HttpClient::curl_write_async_done(void *context, curl_socket_t item) { //LOG(INFO) << "curl_write_async_done"; deferred_req_res_t* req_res = static_cast<deferred_req_res_t *>(context); req_res->server->decr_pending_writes(); if(!req_res->res->is_alive) { // underlying client request is dead, don't try to send anymore data // also, close the socket as we've overridden the close socket handler! close(item); return 0; } req_res->res->body = ""; req_res->res->final = true; // wait until final response is flushed or response object will be destroyed by caller //LOG(INFO) << "Waiting on req_res " << req_res->res; req_res->res->wait(); async_req_res_t* async_req_res = new async_req_res_t(req_res->req, req_res->res, true); req_res->server->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, async_req_res); // Close the socket as we've overridden the close socket handler! close(item); return 0; } CURL *HttpClient::init_curl_stream(const std::string& url, async_stream_response_t& res, long timeout_ms) { CURL* curl = curl_easy_init(); if(!ca_cert_path.empty()) { curl_easy_setopt(curl, CURLOPT_CAINFO, ca_cert_path.c_str()); } else { LOG(WARNING) << "Unable to locate system SSL certificates."; } curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT_MS, 4000); curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, timeout_ms); curl_easy_setopt(curl, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE); // to allow self-signed certs curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, HttpClient::curl_write_stream); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &res); curl_easy_setopt(curl, CURLOPT_CLOSESOCKETFUNCTION, HttpClient::curl_write_stream_done); curl_easy_setopt(curl, CURLOPT_CLOSESOCKETDATA, &res); return curl; } CURL *HttpClient::init_curl_async(const std::string& url, deferred_req_res_t* req_res, curl_slist*& chunk, bool send_ts_api_header) { CURL *curl = curl_easy_init(); if(curl == nullptr) { return nullptr; } req_res->req->data = new client_state_t(curl); // destruction of data is managed by req destructor if(send_ts_api_header) { std::string api_key_header = std::string("x-typesense-api-key: ") + HttpClient::api_key; chunk = curl_slist_append(chunk, api_key_header.c_str()); } // set content length std::string content_length_header = std::string("content-length: ") + std::to_string(req_res->req->_req->content_length); chunk = curl_slist_append(chunk, content_length_header.c_str()); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk); // Enabling this causes issues in mixed mode: client using http/1 but follower -> leader using http/2 //curl_easy_setopt(curl, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE); // callback called every time request body is needed curl_easy_setopt(curl, CURLOPT_READFUNCTION, HttpClient::curl_req_send_callback); // context to callback curl_easy_setopt(curl, CURLOPT_READDATA, (void *)req_res); if(!ca_cert_path.empty()) { curl_easy_setopt(curl, CURLOPT_CAINFO, ca_cert_path.c_str()); } else { LOG(WARNING) << "Unable to locate system SSL certificates."; } curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT_MS, 4000); // to allow self-signed certs curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, HttpClient::curl_write_async); curl_easy_setopt(curl, CURLOPT_WRITEDATA, req_res); curl_easy_setopt(curl, CURLOPT_CLOSESOCKETFUNCTION, HttpClient::curl_write_async_done); curl_easy_setopt(curl, CURLOPT_CLOSESOCKETDATA, req_res); return curl; } CURL *HttpClient::init_curl(const std::string& url, std::string& response, const size_t timeout_ms) { CURL *curl = curl_easy_init(); if(curl == nullptr) { nlohmann::json res; res["message"] = "Failed to initialize HTTP client."; response = res.dump(); return nullptr; } if(!ca_cert_path.empty()) { curl_easy_setopt(curl, CURLOPT_CAINFO, ca_cert_path.c_str()); } else { LOG(WARNING) << "Unable to locate system SSL certificates."; } curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT_MS, 4000); curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, timeout_ms); curl_easy_setopt(curl, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE); // to allow self-signed certs curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, HttpClient::curl_write); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response); return curl; } size_t HttpClient::curl_write(char *contents, size_t size, size_t nmemb, std::string *s) { s->append(contents, size*nmemb); return size*nmemb; } size_t HttpClient::curl_write_download(void *ptr, size_t size, size_t nmemb, FILE *stream) { size_t written = fwrite(ptr, size, nmemb, stream); return written; } long HttpClient::download_file(const std::string& url, const std::string& file_path) { CURL *curl = curl_easy_init(); if(curl == nullptr) { return -1; } FILE *fp = fopen(file_path.c_str(), "wb"); if(fp == nullptr) { LOG(ERROR) << "Unable to open file for writing: " << file_path; return -1; } curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT_MS, 4000); curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); curl_easy_setopt(curl, CURLOPT_WRITEDATA, fp); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, curl_write_download); // follow redirects curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); CURLcode res_code = curl_easy_perform(curl); if(res_code != CURLE_OK) { LOG(ERROR) << "Unable to download file: " << url << " to " << file_path << " - " << curl_easy_strerror(res_code); return -1; } long http_code = 0; curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code); curl_easy_cleanup(curl); fclose(fp); return http_code; }
19,185
C++
.cpp
400
40.715
125
0.634034
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,790
analytics_manager.cpp
typesense_typesense/src/analytics_manager.cpp
#include <mutex> #include <thread> #include "analytics_manager.h" #include "tokenizer.h" #include "http_client.h" #include "collection_manager.h" #include "string_utils.h" #define EVENTS_RATE_LIMIT_SEC 60 Option<bool> AnalyticsManager::create_rule(nlohmann::json& payload, bool upsert, bool write_to_disk) { if(!payload.contains("type") || !payload["type"].is_string()) { return Option<bool>(400, "Request payload contains invalid type."); } if(!payload.contains("name") || !payload["name"].is_string()) { return Option<bool>(400, "Bad or missing name."); } if(!payload.contains("params") || !payload["params"].is_object()) { return Option<bool>(400, "Bad or missing params."); } if(payload["type"] == POPULAR_QUERIES_TYPE || payload["type"] == NOHITS_QUERIES_TYPE || payload["type"] == COUNTER_TYPE || payload["type"] == LOG_TYPE) { return create_index(payload, upsert, write_to_disk); } return Option<bool>(400, "Invalid type."); } Option<bool> AnalyticsManager::create_index(nlohmann::json &payload, bool upsert, bool write_to_disk) { // params and name are validated upstream const std::string& suggestion_config_name = payload["name"].get<std::string>(); bool already_exists = suggestion_configs.find(suggestion_config_name) != suggestion_configs.end(); if(!upsert && already_exists) { return Option<bool>(400, "There's already another configuration with the name `" + suggestion_config_name + "`."); } const auto& params = payload["params"]; if(!params.contains("source") || !params["source"].is_object()) { return Option<bool>(400, "Bad or missing source."); } size_t limit = 1000; bool expand_query = false; bool enable_auto_aggregation = true; if(params.contains("limit") && params["limit"].is_number_integer()) { limit = params["limit"].get<size_t>(); } if(params.contains("expand_query") && params["expand_query"].is_boolean()) { expand_query = params["expand_query"].get<bool>(); } std::string counter_field; std::string destination_collection; std::vector<std::string> src_collections; suggestion_config_t suggestion_config; suggestion_config.name = suggestion_config_name; suggestion_config.limit = limit; suggestion_config.expand_query = expand_query; suggestion_config.rule_type = payload["type"]; //for all types source collection is needed. if(!params["source"].contains("collections") || !params["source"]["collections"].is_array()) { return Option<bool>(400, "Must contain a valid list of source collections."); } else { for(const auto& coll: params["source"]["collections"]) { if (!coll.is_string()) { return Option<bool>(400, "Source collections value should be a string."); } auto collection = CollectionManager::get_instance().get_collection(coll.get<std::string>()); if (collection == nullptr) { return Option<bool>(404, "Collection `" + coll.get<std::string>() + "` is not found"); } const std::string &src_collection = coll.get<std::string>(); src_collections.push_back(src_collection); destination_collection = src_collection; } } if((payload["type"] == POPULAR_QUERIES_TYPE || payload["type"] == NOHITS_QUERIES_TYPE) && (params.contains("enable_auto_aggregation"))) { if(!params["enable_auto_aggregation"].is_boolean()) { return Option<bool>(400, "enable_auto_aggregation should be boolean."); } enable_auto_aggregation = params["enable_auto_aggregation"]; } bool valid_events_found = params["source"].contains("events") && !params["source"]["events"].empty() && params["source"]["events"].is_array() && params["source"]["events"][0].is_object(); if(valid_events_found) { suggestion_config.events = params["source"]["events"]; } else if(payload["type"] == LOG_TYPE || payload["type"] == COUNTER_TYPE) { //events array is mandatory for LOG and COUNTER EVENTS return Option<bool>(400, "Bad or missing events."); } if(payload["type"] != LOG_TYPE) { if(!params.contains("destination") || !params["destination"].is_object()) { return Option<bool>(400, "Bad or missing destination."); } if(!params["destination"].contains("collection") || !params["destination"]["collection"].is_string()) { return Option<bool>(400, "Must contain a valid destination collection."); } if(params["destination"].contains("counter_field")) { if(!params["destination"]["counter_field"].is_string()) { return Option<bool>(400, "Must contain a valid counter_field."); } counter_field = params["destination"]["counter_field"].get<std::string>(); suggestion_config.counter_field = counter_field; } destination_collection = params["destination"]["collection"].get<std::string>(); } if(payload["type"] == POPULAR_QUERIES_TYPE) { if(!upsert && popular_queries.count(destination_collection) != 0) { return Option<bool>(400, "There's already another configuration for this destination collection."); } } else if(payload["type"] == NOHITS_QUERIES_TYPE) { if(!upsert && nohits_queries.count(destination_collection) != 0) { return Option<bool>(400, "There's already another configuration for this destination collection."); } } else if(payload["type"] == COUNTER_TYPE) { if(!upsert && counter_events.count(destination_collection) != 0) { return Option<bool>(400, "There's already another configuration for this destination collection."); } auto coll = CollectionManager::get_instance().get_collection(destination_collection).get(); if(coll != nullptr) { if(!coll->contains_field(counter_field)) { return Option<bool>(404, "counter_field `" + counter_field + "` not found in destination collection."); } } else { return Option<bool>(404, "Collection `" + destination_collection + "` not found."); } } std::unique_lock lock(mutex); if(already_exists) { // remove the previous configuration with same name (upsert) Option<bool> remove_op = remove_index(suggestion_config_name); if(!remove_op.ok()) { return Option<bool>(500, "Error erasing the existing configuration.");; } } if(query_collection_events.count(destination_collection) == 0) { std::vector<event_t> vec; query_collection_events.emplace(destination_collection, vec); } std::map<std::string, uint16_t> event_weight_map; bool log_to_store = payload["type"] == LOG_TYPE; for (const std::string coll: src_collections) { if(query_collection_events.count(coll) == 0) { std::vector<event_t> vec; query_collection_events.emplace(coll, vec); } } if(payload["type"] == POPULAR_QUERIES_TYPE) { QueryAnalytics* popularQueries = new QueryAnalytics(limit, enable_auto_aggregation); popularQueries->set_expand_query(suggestion_config.expand_query); popular_queries.emplace(destination_collection, popularQueries); } else if(payload["type"] == NOHITS_QUERIES_TYPE) { QueryAnalytics* noresultsQueries = new QueryAnalytics(limit, enable_auto_aggregation); nohits_queries.emplace(destination_collection, noresultsQueries); } if(valid_events_found) { for(const auto& event: params["source"]["events"]) { if(!event.contains("name") || event_collection_map.count(event["name"]) != 0) { return Option<bool>(400, "Events must contain a unique name."); } bool event_log_to_store = false; if(payload["type"] == COUNTER_TYPE) { if(!event.contains("weight") || !event["weight"].is_number()) { return Option<bool>(400, "Counter events must contain a weight value."); } event_weight_map[event["name"]] = event["weight"]; } if(event.contains("log_to_store")) { event_log_to_store = event["log_to_store"].get<bool>(); if(event_log_to_store && !analytics_store) { return Option<bool>(400, "Event can't be logged when analytics-db is not defined."); } } event_type_collection ec{event["type"], destination_collection, src_collections, event_log_to_store || log_to_store, suggestion_config_name}; //keep pointer for /events API if(payload["type"] == POPULAR_QUERIES_TYPE) { ec.queries_ptr = popular_queries.at(destination_collection); } else if(payload["type"] == NOHITS_QUERIES_TYPE) { ec.queries_ptr = nohits_queries.at(destination_collection); } event_collection_map.emplace(event["name"], ec); } //store counter events data if(payload["type"] == COUNTER_TYPE) { counter_events.emplace(destination_collection, counter_event_t{counter_field, {}, event_weight_map}); } } suggestion_config.destination_collection = destination_collection; suggestion_config.src_collections = src_collections; suggestion_configs.emplace(suggestion_config_name, suggestion_config); for(const auto& query_coll: suggestion_config.src_collections) { query_collection_mapping[query_coll].push_back(destination_collection); } if(write_to_disk) { auto suggestion_key = std::string(ANALYTICS_RULE_PREFIX) + "_" + suggestion_config_name; bool inserted = store->insert(suggestion_key, payload.dump()); if(!inserted) { return Option<bool>(500, "Error while storing the config to disk."); } } return Option<bool>(true); } AnalyticsManager::~AnalyticsManager() { std::unique_lock lock(mutex); for(auto& kv: popular_queries) { delete kv.second; } for(auto& kv: nohits_queries) { delete kv.second; } } Option<nlohmann::json> AnalyticsManager::list_rules() { std::unique_lock lock(mutex); nlohmann::json rules = nlohmann::json::object(); rules["rules"]= nlohmann::json::array(); for(const auto& suggestion_config: suggestion_configs) { nlohmann::json rule; suggestion_config.second.to_json(rule); rules["rules"].push_back(rule); } return Option<nlohmann::json>(rules); } Option<nlohmann::json> AnalyticsManager::get_rule(const std::string& name) { nlohmann::json rule; std::unique_lock lock(mutex); auto suggestion_config_it = suggestion_configs.find(name); if(suggestion_config_it == suggestion_configs.end()) { return Option<nlohmann::json>(404, "Rule not found."); } suggestion_config_it->second.to_json(rule); return Option<nlohmann::json>(rule); } Option<bool> AnalyticsManager::remove_rule(const std::string &name) { std::unique_lock lock(mutex); auto suggestion_configs_it = suggestion_configs.find(name); if(suggestion_configs_it != suggestion_configs.end()) { return remove_index(name); } return Option<bool>(404, "Rule not found."); } Option<bool> AnalyticsManager::remove_all_rules() { std::unique_lock lock(mutex); std::vector<std::string> rules_list; //populate rules to delete later for(const auto& suggestion_config_it : suggestion_configs) { rules_list.emplace_back(suggestion_config_it.first); } for(const auto& rule : rules_list) { remove_index(rule); } return Option<bool>(true); } Option<bool> AnalyticsManager::remove_index(const std::string &name) { // lock is held by caller auto suggestion_configs_it = suggestion_configs.find(name); if(suggestion_configs_it == suggestion_configs.end()) { return Option<bool>(404, "Rule not found."); } const auto& suggestion_collection = suggestion_configs_it->second.destination_collection; for(const auto& query_collection: suggestion_configs_it->second.src_collections) { query_collection_mapping.erase(query_collection); } if(popular_queries.count(suggestion_collection) != 0) { delete popular_queries[suggestion_collection]; popular_queries.erase(suggestion_collection); } if(nohits_queries.count(suggestion_collection) != 0) { delete nohits_queries[suggestion_collection]; nohits_queries.erase(suggestion_collection); } if(counter_events.count(suggestion_collection) != 0) { counter_events.erase(suggestion_collection); } if(query_collection_events.count(suggestion_collection) != 0) { query_collection_events.erase(suggestion_collection); } suggestion_configs.erase(name); //remove corresponding events with rule for(auto it = event_collection_map.begin(); it != event_collection_map.end();) { if(it->second.analytic_rule == name) { event_collection_map.erase(it++); } else { ++it; } } auto suggestion_key = std::string(ANALYTICS_RULE_PREFIX) + "_" + name; bool erased = store->remove(suggestion_key); if(!erased) { return Option<bool>(500, "Error while deleting from disk."); } return Option<bool>(true); } void AnalyticsManager::add_suggestion(const std::string &query_collection, const std::string& query, const std::string& expanded_query, const bool live_query, const std::string& user_id) { // look up suggestion collections for the query collection std::unique_lock lock(mutex); const auto& suggestion_collections_it = query_collection_mapping.find(query_collection); if(suggestion_collections_it != query_collection_mapping.end()) { for(const auto& suggestion_collection: suggestion_collections_it->second) { const auto& popular_queries_it = popular_queries.find(suggestion_collection); if(popular_queries_it != popular_queries.end() && popular_queries_it->second->is_auto_aggregation_enabled()) { popular_queries_it->second->add(query, expanded_query, live_query, user_id); } } } } Option<bool> AnalyticsManager::add_event(const std::string& client_ip, const std::string& event_type, const std::string& event_name, const nlohmann::json& event_json) { std::unique_lock lock(mutex); const auto event_collection_map_it = event_collection_map.find(event_name); if(event_collection_map_it == event_collection_map.end()) { return Option<bool>(404, "No analytics rule defined for event name " + event_name); } if(event_collection_map_it->second.event_type != event_type) { return Option<bool>(400, "event_type mismatch in analytic rules."); } std::string destination_collection = event_collection_map_it->second.destination_collection; std::vector<std::string> src_collections = event_collection_map_it->second.src_collections; std::string src_collection; if (!event_json.contains("collection") && src_collections.size() == 1) { src_collection = src_collections[0]; } else if(!event_json.contains("collection") && src_collections.size() > 1) { return Option<bool>(400, "Multiple source collections. 'collection' should be specified"); } else if (event_json.contains("collection")) { if(std::find(src_collections.begin(), src_collections.end(), event_json["collection"]) == src_collections.end()) { return Option<bool>(400, event_json["collection"].get<std::string>() + " not found in the rule " + event_name); } src_collection = event_json["collection"]; } const auto& query_collection_events_it = query_collection_events.find(src_collection); if(query_collection_events_it != query_collection_events.end()) { auto &events_vec = query_collection_events_it->second; #ifdef TEST_BUILD if (isRateLimitEnabled) { #endif auto now_ts_seconds = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); auto events_cache_it = events_cache.find(client_ip); if (events_cache_it != events_cache.end()) { // event found in events cache if ((now_ts_seconds - events_cache_it->second.last_update_time) < EVENTS_RATE_LIMIT_SEC) { if (events_cache_it->second.count >= analytics_minute_rate_limit) { return Option<bool>(500, "event rate limit reached."); } else { events_cache_it->second.count++; } } else { events_cache_it->second.last_update_time = now_ts_seconds; events_cache_it->second.count = 1; } } else { event_cache_t eventCache{(uint64_t) now_ts_seconds, 1}; events_cache.insert(client_ip, eventCache); } #ifdef TEST_BUILD } #endif auto now_ts_useconds = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count(); std::string query; std::string user_id; std::string doc_id; std::vector<std::pair<std::string, std::string>> custom_data; if(event_type == SEARCH_EVENT) { query = event_json["q"].get<std::string>(); user_id = event_json["user_id"].get<std::string>(); //add to respective popular queries/nohits queries if(event_collection_map_it->second.queries_ptr) { event_collection_map_it->second.queries_ptr->add(query, query, false, user_id); } else { return Option<bool>(500, "Error in /events endpoint for event " + event_name); } } else if (event_type == CUSTOM_EVENT) { for(auto itr = event_json.begin(); itr != event_json.end(); ++itr) { if (itr.key() == "query") { query = itr.value().get<std::string>(); } else if (itr.key()== "user_id") { user_id = itr.value().get<std::string>(); } else if (itr.key() == "doc_id") { doc_id = itr.value().get<std::string>(); } else { auto kv = std::make_pair(itr.key(), itr.value().get<std::string>()); custom_data.push_back(kv); } } } else { query = event_json.contains("q") ? event_json["q"].get<std::string>() : ""; user_id = event_json.contains("user_id") ? event_json["user_id"].get<std::string>() : ""; doc_id = event_json["doc_id"].get<std::string>(); } if(event_collection_map_it->second.log_to_store) { //only store events if log_to_store is specified in rule //remove any '%' found in userid user_id.erase(std::remove(user_id.begin(), user_id.end(), '%'), user_id.end()); event_t event(query, event_type, now_ts_useconds, user_id, doc_id, event_name, event_collection_map[event_name].log_to_store, custom_data); events_vec.emplace_back(event); } if (!counter_events.empty()) { auto counter_events_it = counter_events.find(destination_collection); if (counter_events_it != counter_events.end()) { auto event_weight_map_it = counter_events_it->second.event_weight_map.find(event_name); if (event_weight_map_it != counter_events_it->second.event_weight_map.end()) { auto inc_val = event_weight_map_it->second; counter_events_it->second.docid_counts[doc_id] += inc_val; } else { LOG(ERROR) << "event_name " << event_name << " not defined in analytic rule for counter events."; } } else { LOG(ERROR) << "collection " << destination_collection << " not found in analytics rule."; } } } else { return Option<bool>(500, "Failure in adding an event."); } return Option<bool>(true); } void AnalyticsManager::add_nohits_query(const std::string &query_collection, const std::string &query, bool live_query, const std::string &user_id) { // look up suggestion collections for the query collection std::unique_lock lock(mutex); const auto& suggestion_collections_it = query_collection_mapping.find(query_collection); if(suggestion_collections_it != query_collection_mapping.end()) { for(const auto& suggestion_collection: suggestion_collections_it->second) { const auto& noresults_queries_it = nohits_queries.find(suggestion_collection); if(noresults_queries_it != nohits_queries.end() && noresults_queries_it->second->is_auto_aggregation_enabled()) { noresults_queries_it->second->add(query, query, live_query, user_id); } } } } void AnalyticsManager::run(ReplicationState* raft_server) { uint64_t prev_persistence_s = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); while(!quit) { std::unique_lock lk(mutex); cv.wait_for(lk, std::chrono::seconds(QUERY_COMPACTION_INTERVAL_S), [&] { return quit.load(); }); //LOG(INFO) << "AnalyticsManager::run"; if(quit) { lk.unlock(); break; } auto now_ts_seconds = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); if(now_ts_seconds - prev_persistence_s < Config::get_instance().get_analytics_flush_interval()) { // we will persist aggregation every hour // LOG(INFO) << "QuerySuggestions::run interval is less, continuing"; continue; } persist_query_events(raft_server, prev_persistence_s); persist_events(raft_server, prev_persistence_s); persist_popular_events(raft_server, prev_persistence_s); prev_persistence_s = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); lk.unlock(); } dispose(); } void AnalyticsManager::persist_query_events(ReplicationState *raft_server, uint64_t prev_persistence_s) { // lock is held by caller auto send_http_response = [&](QueryAnalytics* queryAnalyticsPtr, const std::string& import_payload, const std::string& suggestion_coll, const std::string& query_type) { // send http request std::string leader_url = raft_server->get_leader_url(); if(!leader_url.empty()) { const std::string& base_url = leader_url + "collections/" + suggestion_coll; std::string res; const std::string& update_url = base_url + "/documents/import?action=emplace"; std::map<std::string, std::string> res_headers; long status_code = HttpClient::post_response(update_url, import_payload, res, res_headers, {}, 10*1000, true); if(status_code != 200) { LOG(ERROR) << "Error while sending "<< query_type <<" events to leader. " << "Status code: " << status_code << ", response: " << res; } else { LOG(INFO) << "Query aggregation for collection: " + suggestion_coll; queryAnalyticsPtr->reset_local_counts(); if(raft_server->is_leader()) { // try to run top-K compaction of suggestion collection const std::string top_k_param = "count:" + std::to_string(queryAnalyticsPtr->get_k()); const std::string& truncate_topk_url = base_url + "/documents?top_k_by=" + top_k_param; res.clear(); res_headers.clear(); status_code = HttpClient::delete_response(truncate_topk_url, res, res_headers, 10*1000, true); if(status_code != 200) { LOG(ERROR) << "Error while running top K for " << query_type <<" suggestions collection. " << "Status code: " << status_code << ", response: " << res; } else { LOG(INFO) << "Top K aggregation for collection: " + suggestion_coll; } } } } }; for(const auto& suggestion_config: suggestion_configs) { const std::string& sink_name = suggestion_config.first; const std::string& suggestion_coll = suggestion_config.second.destination_collection; auto popular_queries_it = popular_queries.find(suggestion_coll); auto nohits_queries_it = nohits_queries.find(suggestion_coll); // need to prepare the counts as JSON docs for import into the suggestion collection // {"id": "432432", "q": "foo", "$operations": {"increment": {"count": 100}}} std::string import_payload; if(popular_queries_it != popular_queries.end()) { import_payload.clear(); QueryAnalytics *popularQueries = popular_queries_it->second; // aggregate prefix queries to their final form auto now = std::chrono::system_clock::now().time_since_epoch(); auto now_ts_us = std::chrono::duration_cast<std::chrono::microseconds>(now).count(); popularQueries->compact_user_queries(now_ts_us); popularQueries->serialize_as_docs(import_payload); send_http_response(popularQueries, import_payload, suggestion_coll, "popular queries"); } if(nohits_queries_it != nohits_queries.end()) { import_payload.clear(); QueryAnalytics *nohitsQueries = nohits_queries_it->second; // aggregate prefix queries to their final form auto now = std::chrono::system_clock::now().time_since_epoch(); auto now_ts_us = std::chrono::duration_cast<std::chrono::microseconds>(now).count(); nohitsQueries->compact_user_queries(now_ts_us); nohitsQueries->serialize_as_docs(import_payload); send_http_response(nohitsQueries, import_payload, suggestion_coll, "nohits queries"); } if(import_payload.empty()) { continue; } } } void AnalyticsManager::persist_events(ReplicationState *raft_server, uint64_t prev_persistence_s) { // lock is held by caller auto send_http_response = [&](const std::string& import_payload) { // send http request if(raft_server == nullptr) { return; } std::string leader_url = raft_server->get_leader_url(); if(!leader_url.empty()) { const std::string& base_url = leader_url + "analytics/"; std::string res; const std::string& update_url = base_url + "aggregate_events"; std::map<std::string, std::string> res_headers; long status_code = HttpClient::post_response(update_url, import_payload, res, res_headers, {}, 10*1000, true); if(status_code != 200) { LOG(ERROR) << "Error while sending "<<" log events to leader. " << "Status code: " << status_code << ", response: " << res; } } }; nlohmann::json payload = nlohmann::json::array(); for (auto &events_collection_it: query_collection_events) { const auto& collection = events_collection_it.first; for (const auto &event: events_collection_it.second) { if (event.log_to_store) { nlohmann::json event_data; event.to_json(event_data, collection); payload.push_back(event_data); } } if(!payload.empty()) { send_http_response(payload.dump()); events_collection_it.second.clear(); } } } void AnalyticsManager::persist_popular_events(ReplicationState *raft_server, uint64_t prev_persistence_s) { auto send_http_response = [&](const std::string& import_payload, const std::string& collection) { if (raft_server == nullptr) { return; } std::string leader_url = raft_server->get_leader_url(); if (!leader_url.empty()) { const std::string &base_url = leader_url + "collections/" + collection; std::string res; const std::string &update_url = base_url + "/documents/import?action=update"; std::map<std::string, std::string> res_headers; long status_code = HttpClient::post_response(update_url, import_payload, res, res_headers, {}, 10 * 1000, true); if (status_code != 200) { LOG(ERROR) << "Error while sending popular_clicks events to leader. " << "Status code: " << status_code << ", response: " << res; } } }; for(auto& counter_event_it : counter_events) { auto coll = counter_event_it.first; std::string docs; counter_event_it.second.serialize_as_docs(docs); send_http_response(docs, coll); counter_event_it.second.docid_counts.clear(); } } void AnalyticsManager::stop() { quit = true; dispose(); cv.notify_all(); } void AnalyticsManager::dispose() { std::unique_lock lk(mutex); for(auto& kv: popular_queries) { delete kv.second; } popular_queries.clear(); for(auto& kv: nohits_queries) { delete kv.second; } nohits_queries.clear(); suggestion_configs.clear(); query_collection_mapping.clear(); counter_events.clear(); query_collection_events.clear(); event_collection_map.clear(); events_cache.clear(); } void AnalyticsManager::init(Store* store, Store* analytics_store, uint32_t analytics_minute_rate_limit) { this->store = store; this->analytics_store = analytics_store; this->analytics_minute_rate_limit = analytics_minute_rate_limit; if(analytics_store) { events_cache.capacity(1024); } } std::unordered_map<std::string, QueryAnalytics*> AnalyticsManager::get_popular_queries() { std::unique_lock lk(mutex); return popular_queries; } std::unordered_map<std::string, QueryAnalytics*> AnalyticsManager::get_nohits_queries() { std::unique_lock lk(mutex); return nohits_queries; } std::unordered_map<std::string, counter_event_t> AnalyticsManager::get_popular_clicks() { std::unique_lock lk(mutex); return counter_events; } void AnalyticsManager::resetToggleRateLimit(bool toggle) { std::unique_lock lk(mutex); events_cache.clear(); isRateLimitEnabled = toggle; } void counter_event_t::serialize_as_docs(std::string &docs) { for (auto kv: docid_counts) { nlohmann::json doc; doc["id"] = kv.first; doc["$operations"]["increment"][counter_field] = kv.second; docs += doc.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore) + "\n"; } if (!docs.empty()) { docs.pop_back(); } } bool AnalyticsManager::write_to_db(const nlohmann::json& payload) { if(analytics_store) { for(const auto& event: payload) { std::string userid = event["user_id"].get<std::string>(); std::string event_name = event["name"].get<std::string>(); std::string ts = StringUtils::serialize_uint64_t(event["timestamp"].get<uint64_t>()); std::string key = userid + "%" + event_name + "%" + ts; bool inserted = analytics_store->insert(key, event.dump()); if(!inserted) { LOG(ERROR) << "Error while dumping events to analytics db."; return false; } } } else { LOG(ERROR) << "Analytics DB not initialized!!"; return false; } return true; } void AnalyticsManager::get_last_N_events(const std::string& userid, const std::string& event_name, uint32_t N, std::vector<std::string>& values) { std::string user_id = userid; //erase any '%' in userid user_id.erase(std::remove(user_id.begin(), user_id.end(), '%'), user_id.end()); auto userid_prefix = user_id + "%"; if(event_name != "*") { userid_prefix += event_name; } analytics_store->get_last_N_values(userid_prefix, N, values); } void event_t::to_json(nlohmann::json& obj, const std::string& coll) const { obj["query"] = query; obj["type"] = event_type; obj["timestamp"] = timestamp; obj["user_id"] = user_id; obj["doc_id"] = doc_id; obj["name"] = name; obj["collection"] = coll; if(event_type == "custom") { for(const auto& kv : data) { obj[kv.first] = kv.second; } } }
33,802
C++
.cpp
692
39.060694
153
0.606694
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,791
collection.cpp
typesense_typesense/src/collection.cpp
#include "collection.h" #include <numeric> #include <chrono> #include <match_score.h> #include <string_utils.h> #include <art.h> #include <rocksdb/write_batch.h> #include <system_metrics.h> #include <tokenizer.h> #include <collection_manager.h> #include <regex> #include <list> #include <posting.h> #include <timsort.hpp> #include "validator.h" #include "topster.h" #include "logger.h" #include "thread_local_vars.h" #include "vector_query_ops.h" #include "embedder_manager.h" #include "stopwords_manager.h" #include "conversation_model.h" #include "conversation_manager.h" #include "conversation_model_manager.h" #include "field.h" #include "join.h" const std::string override_t::MATCH_EXACT = "exact"; const std::string override_t::MATCH_CONTAINS = "contains"; struct sort_fields_guard_t { std::vector<sort_by> sort_fields_std; ~sort_fields_guard_t() { for(auto& sort_by_clause: sort_fields_std) { for (auto& eval_ids: sort_by_clause.eval.eval_ids_vec) { delete [] eval_ids; } for (uint32_t i = 0; i < sort_by_clause.eval_expressions.size(); i++) { delete sort_by_clause.eval.filter_trees[i]; } delete [] sort_by_clause.eval.filter_trees; } } }; Collection::Collection(const std::string& name, const uint32_t collection_id, const uint64_t created_at, const uint32_t next_seq_id, Store *store, const std::vector<field> &fields, const std::string& default_sorting_field, const float max_memory_ratio, const std::string& fallback_field_type, const std::vector<std::string>& symbols_to_index, const std::vector<std::string>& token_separators, const bool enable_nested_fields, std::shared_ptr<VQModel> vq_model, spp::sparse_hash_map<std::string, std::string> referenced_in, const nlohmann::json& metadata, spp::sparse_hash_map<std::string, std::set<reference_pair_t>> async_referenced_ins) : name(name), collection_id(collection_id), created_at(created_at), next_seq_id(next_seq_id), store(store), fields(fields), default_sorting_field(default_sorting_field), enable_nested_fields(enable_nested_fields), max_memory_ratio(max_memory_ratio), fallback_field_type(fallback_field_type), dynamic_fields({}), symbols_to_index(to_char_array(symbols_to_index)), token_separators(to_char_array(token_separators)), index(init_index()), vq_model(vq_model), referenced_in(std::move(referenced_in)), metadata(metadata), async_referenced_ins(std::move(async_referenced_ins)) { if (vq_model) { vq_model->inc_collection_ref_count(); } this->num_documents = 0; } Collection::~Collection() { std::unique_lock lifecycle_lock(lifecycle_mutex); std::unique_lock lock(mutex); delete index; delete synonym_index; if (vq_model) { vq_model->dec_collection_ref_count(); if (vq_model->get_collection_ref_count() == 0) { LOG(INFO) << "Unloading voice query model " << vq_model->get_model_name(); VQModelManager::get_instance().delete_model(vq_model->get_model_name()); } } } uint32_t Collection::get_next_seq_id() { std::shared_lock lock(mutex); store->increment(get_next_seq_id_key(name), 1); return next_seq_id++; } inline std::string get_field_value(const nlohmann::json& doc, const std::string& field_name) { return doc[field_name].is_number_integer() ? std::to_string(doc[field_name].get<int64_t>()) : doc[field_name].is_string() ? doc[field_name].get<std::string>() : doc[field_name].dump(); } inline std::string get_array_field_value(const nlohmann::json& doc, const std::string& field_name, const size_t& index) { return doc[field_name][index].is_number_integer() ? std::to_string(doc[field_name][index].get<int64_t>()) : doc[field_name][index].is_string() ? doc[field_name][index].get<std::string>() : doc[field_name][index].dump(); } Option<bool> Collection::update_async_references_with_lock(const std::string& ref_coll_name, const std::string& filter, const std::set<std::string>& filter_values, const uint32_t ref_seq_id, const std::string& field_name) { // Update reference helper field of the docs matching the filter. filter_result_t filter_result; get_filter_ids(filter, filter_result, false); if (filter_result.count == 0) { return Option<bool>(true); } field field; { std::shared_lock lock(mutex); auto it = search_schema.find(field_name); if (it == search_schema.end()) { return Option<bool>(400, "Could not find field `" + field_name + "` in the schema."); } field = it.value(); } std::vector<std::string> buffer; buffer.reserve(filter_result.count); for (uint32_t i = 0; i < filter_result.count; i++) { auto const& seq_id = filter_result.docs[i]; nlohmann::json existing_document; auto get_doc_op = get_document_from_store(get_seq_id_key(seq_id), existing_document); if (!get_doc_op.ok()) { if (get_doc_op.code() == 404) { LOG(ERROR) << "`" << name << "` collection: Sequence ID `" << seq_id << "` exists, but document is missing."; continue; } LOG(ERROR) << "`" << name << "` collection: " << get_doc_op.error(); continue; } auto const id = existing_document["id"].get<std::string>(); auto const reference_helper_field_name = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX; if (field.is_singular()) { // Referenced value must be unique. if (existing_document.contains(reference_helper_field_name) && existing_document[reference_helper_field_name].is_number_integer()) { const int64_t existing_ref_seq_id = existing_document[reference_helper_field_name].get<int64_t>(); if (existing_ref_seq_id != Index::reference_helper_sentinel_value && existing_ref_seq_id != ref_seq_id) { return Option<bool>(400, "Document `id: " + id + "` already has a reference to document `" += std::to_string(existing_ref_seq_id) + "` of `" += ref_coll_name + "` collection, having reference value `" += get_field_value(existing_document, field_name) + "`."); } else if (existing_ref_seq_id == ref_seq_id) { continue; } } // Set reference helper field of all the docs that matched filter to `ref_seq_id`. nlohmann::json update_document; update_document["id"] = id; update_document[field_name] = existing_document[field_name]; update_document[reference_helper_field_name] = ref_seq_id; buffer.push_back(update_document.dump()); } else { if (!existing_document.contains(field_name) || !existing_document[field_name].is_array()) { return Option<bool>(400, "Expected document `id: " + id + "` to have `" += field_name + "` array field " "that is `" += get_field_value(existing_document, field_name) + "` instead."); } else if (!existing_document.contains(reference_helper_field_name) || !existing_document[reference_helper_field_name].is_array()) { return Option<bool>(400, "Expected document `id: " + id + "` to have `" += reference_helper_field_name + "` array field that is `" += get_field_value(existing_document, field_name) + "` instead."); } else if (existing_document[field_name].size() != existing_document[reference_helper_field_name].size()) { return Option<bool>(400, "Expected document `id: " + id + "` to have equal count of elements in `" += field_name + ": " += get_field_value(existing_document, field_name) + "` field and `" += reference_helper_field_name + ": " += get_field_value(existing_document, reference_helper_field_name) + "` field."); } nlohmann::json update_document; update_document["id"] = id; update_document[field_name] = existing_document[field_name]; update_document[reference_helper_field_name] = existing_document[reference_helper_field_name]; auto should_update = false; for (uint32_t j = 0; j < existing_document[field_name].size(); j++) { auto const& ref_value = get_array_field_value(existing_document, field_name, j); if (filter_values.count(ref_value) == 0) { continue; } const int64_t existing_ref_seq_id = existing_document[reference_helper_field_name][j].get<int64_t>(); if (existing_ref_seq_id != Index::reference_helper_sentinel_value && existing_ref_seq_id != ref_seq_id) { return Option<bool>(400, "Document `id: " + id + "` at `" += field_name + "` reference array field and index `" + std::to_string(j) + "` already has a reference to document `" += std::to_string(existing_ref_seq_id) + "` of `" += ref_coll_name + "` collection, having reference value `" += get_array_field_value(existing_document, field_name, j) + "`."); } else if (existing_ref_seq_id == ref_seq_id) { continue; } should_update = true; // Set reference helper field to `ref_seq_id` at the index corresponding to where reference field has value. update_document[reference_helper_field_name][j] = ref_seq_id; } if (should_update) { buffer.push_back(update_document.dump()); } } } nlohmann::json dummy; add_many(buffer, dummy, index_operation_t::UPDATE); return Option<bool>(true); } Option<doc_seq_id_t> Collection::to_doc(const std::string & json_str, nlohmann::json& document, const index_operation_t& operation, const DIRTY_VALUES dirty_values, const std::string& id) { try { document = nlohmann::json::parse(json_str); } catch(const std::exception& e) { LOG(ERROR) << "JSON error: " << e.what(); return Option<doc_seq_id_t>(400, std::string("Bad JSON: ") + e.what()); } if(!document.is_object()) { return Option<doc_seq_id_t>(400, "Bad JSON: not a properly formed document."); } if(document.count("id") != 0 && id != "" && document["id"] != id) { return Option<doc_seq_id_t>(400, "The `id` of the resource does not match the `id` in the JSON body."); } if(document.count("id") == 0 && !id.empty()) { // use the explicit ID (usually from a PUT request) if document body does not have it document["id"] = id; } if(document.count("id") != 0 && document["id"] == "") { return Option<doc_seq_id_t>(400, "The `id` should not be empty."); } if(document.count("id") == 0) { if(operation == UPDATE) { return Option<doc_seq_id_t>(400, "For update, the `id` key must be provided."); } // for UPSERT, EMPLACE or CREATE, if a document does not have an ID, we will treat it as a new doc uint32_t seq_id = get_next_seq_id(); document["id"] = std::to_string(seq_id); return Option<doc_seq_id_t>(doc_seq_id_t{seq_id, true}); } else { if(!document["id"].is_string()) { return Option<doc_seq_id_t>(400, "Document's `id` field should be a string."); } const std::string& doc_id = document["id"]; // try to get the corresponding sequence id from disk if present std::string seq_id_str; StoreStatus seq_id_status = store->get(get_doc_id_key(doc_id), seq_id_str); if(seq_id_status == StoreStatus::ERROR) { return Option<doc_seq_id_t>(500, "Error fetching the sequence key for document with id: " + doc_id); } if(seq_id_status == StoreStatus::FOUND) { if(operation == CREATE) { return Option<doc_seq_id_t>(409, std::string("A document with id ") + doc_id + " already exists."); } // UPSERT, EMPLACE or UPDATE uint32_t seq_id = (uint32_t) std::stoul(seq_id_str); return Option<doc_seq_id_t>(doc_seq_id_t{seq_id, false}); } else { if(operation == UPDATE) { // for UPDATE, a document with given ID must be found return Option<doc_seq_id_t>(404, "Could not find a document with id: " + doc_id); } else { // for UPSERT, EMPLACE or CREATE, if a document with given ID is not found, we will treat it as a new doc uint32_t seq_id = get_next_seq_id(); return Option<doc_seq_id_t>(doc_seq_id_t{seq_id, true}); } } } } nlohmann::json Collection::get_summary_json() const { std::shared_lock lock(mutex); nlohmann::json json_response; json_response["name"] = name; json_response["num_documents"] = num_documents.load(); json_response["created_at"] = created_at.load(); json_response["enable_nested_fields"] = enable_nested_fields; json_response["token_separators"] = nlohmann::json::array(); json_response["symbols_to_index"] = nlohmann::json::array(); for(auto c: symbols_to_index) { json_response["symbols_to_index"].push_back(std::string(1, c)); } for(auto c: token_separators) { json_response["token_separators"].push_back(std::string(1, c)); } nlohmann::json fields_arr; const std::regex sequence_id_pattern(".*_sequence_id$"); for(const field & coll_field: fields) { if (std::regex_match(coll_field.name, sequence_id_pattern)) { // Don't add foo_sequence_id field. continue; } nlohmann::json field_json; field_json[fields::name] = coll_field.name; field_json[fields::type] = coll_field.type; field_json[fields::facet] = coll_field.facet; field_json[fields::optional] = coll_field.optional; field_json[fields::index] = coll_field.index; field_json[fields::sort] = coll_field.sort; field_json[fields::infix] = coll_field.infix; field_json[fields::locale] = coll_field.locale; field_json[fields::stem] = coll_field.stem; field_json[fields::store] = coll_field.store; if(coll_field.range_index) { field_json[fields::range_index] = coll_field.range_index; } // no need to sned hnsw_params for text fields if(coll_field.num_dim > 0) { field_json[fields::hnsw_params] = coll_field.hnsw_params; } if(coll_field.embed.count(fields::from) != 0) { field_json[fields::embed] = coll_field.embed; if(field_json[fields::embed].count(fields::model_config) != 0) { hide_credential(field_json[fields::embed][fields::model_config], "api_key"); hide_credential(field_json[fields::embed][fields::model_config], "access_token"); hide_credential(field_json[fields::embed][fields::model_config], "refresh_token"); hide_credential(field_json[fields::embed][fields::model_config], "client_id"); hide_credential(field_json[fields::embed][fields::model_config], "client_secret"); hide_credential(field_json[fields::embed][fields::model_config], "project_id"); } } if(coll_field.num_dim > 0) { field_json[fields::num_dim] = coll_field.num_dim; field_json[fields::vec_dist] = magic_enum::enum_name(coll_field.vec_dist); } if (!coll_field.reference.empty()) { field_json[fields::reference] = coll_field.reference; field_json[fields::async_reference] = coll_field.is_async_reference; } fields_arr.push_back(field_json); } json_response["fields"] = fields_arr; json_response["default_sorting_field"] = default_sorting_field; if(!metadata.empty()) { json_response["metadata"] = metadata; } if(vq_model) { json_response["voice_query_model"] = nlohmann::json::object(); json_response["voice_query_model"]["model_name"] = vq_model->get_model_name(); } return json_response; } Option<nlohmann::json> Collection::add(const std::string & json_str, const index_operation_t& operation, const std::string& id, const DIRTY_VALUES& dirty_values) { nlohmann::json document; std::vector<std::string> json_lines = {json_str}; const nlohmann::json& res = add_many(json_lines, document, operation, id, dirty_values, false, false); if(!res["success"].get<bool>()) { nlohmann::json res_doc; try { res_doc = nlohmann::json::parse(json_lines[0]); } catch(const std::exception& e) { LOG(ERROR) << "JSON error: " << e.what(); return Option<nlohmann::json>(400, std::string("Bad JSON: ") + e.what()); } return Option<nlohmann::json>(res_doc["code"].get<size_t>(), res_doc["error"].get<std::string>()); } return Option<nlohmann::json>(document); } bool Collection::check_and_add_nested_field(tsl::htrie_map<char, field>& nested_fields, const field& nested_field) { // if field is an object or object_array field, we have to remove matching children if(nested_field.is_object()) { auto it = nested_fields.equal_prefix_range(nested_field.name + "."); if(it.first != it.second) { // children exist, and they should be removed std::vector<std::string> child_fields; for(auto child_field = it.first; child_field != it.second; child_field++) { child_fields.push_back(child_field.key()); } for(const auto& child_field: child_fields) { nested_fields.erase(child_field); } } } // we will only add a child if none of the parent already exists std::vector<std::string> name_parts; StringUtils::split(nested_field.name, name_parts, "."); if(name_parts.size() == 1) { // dot not found if(nested_fields.find(nested_field.name) == nested_fields.end()) { nested_fields[nested_field.name] = nested_field; return true; } return false; } std::string parent_path; for(size_t i = 0; i < name_parts.size(); i++) { if (!parent_path.empty()) { parent_path += "."; } parent_path += name_parts[i]; if (nested_fields.find(parent_path) != nested_fields.end()) { // parent found, so we will not add this field return false; } } // emplace only if no parent path is found nested_fields[nested_field.name] = nested_field; return true; } nlohmann::json Collection::add_many(std::vector<std::string>& json_lines, nlohmann::json& document, const index_operation_t& operation, const std::string& id, const DIRTY_VALUES& dirty_values, const bool& return_doc, const bool& return_id, const size_t remote_embedding_batch_size, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) { //LOG(INFO) << "Memory ratio. Max = " << max_memory_ratio << ", Used = " << SystemMetrics::used_memory_ratio(); std::vector<index_record> index_records; const size_t index_batch_size = 1000; size_t num_indexed = 0; //bool exceeds_memory_limit = false; // ensures that document IDs are not repeated within the same batch std::set<std::string> batch_doc_ids; bool found_batch_new_field = false; for(size_t i=0; i < json_lines.size(); i++) { const std::string & json_line = json_lines[i]; Option<doc_seq_id_t> doc_seq_id_op = to_doc(json_line, document, operation, dirty_values, id); const uint32_t seq_id = doc_seq_id_op.ok() ? doc_seq_id_op.get().seq_id : 0; index_record record(i, seq_id, document, operation, dirty_values); // NOTE: we overwrite the input json_lines with result to avoid memory pressure record.is_update = false; bool repeated_doc = false; std::vector<field> new_fields; if(!doc_seq_id_op.ok()) { record.index_failure(doc_seq_id_op.code(), doc_seq_id_op.error()); } else { const std::string& doc_id = record.doc["id"].get<std::string>(); repeated_doc = (batch_doc_ids.find(doc_id) != batch_doc_ids.end()); if(repeated_doc) { // when a document repeats, we send the batch until this document so that we can deal with conflicts i--; goto do_batched_index; } record.is_update = !doc_seq_id_op.get().is_new; if(record.is_update) { get_document_from_store(get_seq_id_key(seq_id), record.old_doc); } batch_doc_ids.insert(doc_id); std::shared_lock lock(mutex); // if `fallback_field_type` or `dynamic_fields` is enabled, update schema first before indexing if(!fallback_field_type.empty() || !dynamic_fields.empty() || !nested_fields.empty() || !reference_fields.empty() || !async_referenced_ins.empty()) { Option<bool> new_fields_op = detect_new_fields(record.doc, dirty_values, search_schema, dynamic_fields, nested_fields, fallback_field_type, record.is_update, new_fields, enable_nested_fields, reference_fields, object_reference_helper_fields); if(!new_fields_op.ok()) { record.index_failure(new_fields_op.code(), new_fields_op.error()); } } } if(!new_fields.empty()) { std::unique_lock lock(mutex); bool found_new_field = false; for(auto& new_field: new_fields) { if(search_schema.find(new_field.name) == search_schema.end()) { found_new_field = true; found_batch_new_field = true; search_schema.emplace(new_field.name, new_field); fields.emplace_back(new_field); if(new_field.nested) { check_and_add_nested_field(nested_fields, new_field); } } } if(found_new_field) { index->refresh_schemas(new_fields, {}); } } index_records.emplace_back(std::move(record)); do_batched_index: if((i+1) % index_batch_size == 0 || i == json_lines.size()-1 || repeated_doc) { batch_index(index_records, json_lines, num_indexed, return_doc, return_id, remote_embedding_batch_size, remote_embedding_timeout_ms, remote_embedding_num_tries); if(found_batch_new_field) { persist_collection_meta(); } // to return the document for the single doc add cases if(index_records.size() == 1) { const auto& rec = index_records[0]; document = rec.is_update ? rec.new_doc : rec.doc; remove_flat_fields(document); remove_reference_helper_fields(document); } index_records.clear(); batch_doc_ids.clear(); } } nlohmann::json resp_summary; resp_summary["num_imported"] = num_indexed; resp_summary["success"] = (num_indexed == json_lines.size()); return resp_summary; } Option<nlohmann::json> Collection::update_matching_filter(const std::string& filter_query, const std::string & json_str, std::string& req_dirty_values, const int batch_size) { auto _filter_query = filter_query; StringUtils::trim(_filter_query); if (_filter_query.empty()) { nlohmann::json resp_summary; resp_summary["num_updated"] = 0; return Option(resp_summary); } const auto& dirty_values = parse_dirty_values_option(req_dirty_values); size_t docs_updated_count = 0; nlohmann::json update_document, dummy; try { update_document = nlohmann::json::parse(json_str); } catch(const std::exception& e) { LOG(ERROR) << "JSON error: " << e.what(); return Option<nlohmann::json>(400, std::string("Bad JSON: ") + e.what()); } std::vector<std::string> buffer; buffer.reserve(batch_size); if (_filter_query == "*") { // Get an iterator from rocksdb and iterate over all the documents present in the collection. std::string iter_upper_bound_key = get_seq_id_collection_prefix() + "`"; auto iter_upper_bound = new rocksdb::Slice(iter_upper_bound_key); CollectionManager & collectionManager = CollectionManager::get_instance(); const std::string seq_id_prefix = get_seq_id_collection_prefix(); rocksdb::Iterator* it = collectionManager.get_store()->scan(seq_id_prefix, iter_upper_bound); while(it->Valid()) { // Generate a batch of documents to be ingested by add_many. for (int buffer_counter = 0; buffer_counter < batch_size && it->Valid();) { auto json_doc_str = it->value().ToString(); it->Next(); nlohmann::json existing_document; try { existing_document = nlohmann::json::parse(json_doc_str); } catch(...) { continue; // Don't add into buffer. } update_document["id"] = existing_document["id"].get<std::string>(); buffer.push_back(update_document.dump()); buffer_counter++; } auto res = add_many(buffer, dummy, index_operation_t::UPDATE, "", dirty_values); docs_updated_count += res["num_imported"].get<size_t>(); buffer.clear(); } delete iter_upper_bound; delete it; } else { filter_result_t filter_result; auto filter_ids_op = get_filter_ids(_filter_query, filter_result, false); if(!filter_ids_op.ok()) { return Option<nlohmann::json>(filter_ids_op.code(), filter_ids_op.error()); } for (size_t i = 0; i < filter_result.count;) { for (int buffer_counter = 0; buffer_counter < batch_size && i < filter_result.count;) { uint32_t seq_id = filter_result.docs[i++]; nlohmann::json existing_document; auto get_doc_op = get_document_from_store(get_seq_id_key(seq_id), existing_document); if (!get_doc_op.ok()) { continue; } update_document["id"] = existing_document["id"].get<std::string>(); buffer.push_back(update_document.dump()); buffer_counter++; } auto res = add_many(buffer, dummy, index_operation_t::UPDATE, "", dirty_values); docs_updated_count += res["num_imported"].get<size_t>(); buffer.clear(); } } nlohmann::json resp_summary; resp_summary["num_updated"] = docs_updated_count; return Option(resp_summary); } bool Collection::is_exceeding_memory_threshold() const { return SystemMetrics::used_memory_ratio() > max_memory_ratio; } void Collection::batch_index(std::vector<index_record>& index_records, std::vector<std::string>& json_out, size_t &num_indexed, const bool& return_doc, const bool& return_id, const size_t remote_embedding_batch_size, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) { batch_index_in_memory(index_records, remote_embedding_batch_size, remote_embedding_timeout_ms, remote_embedding_num_tries, true); // store only documents that were indexed in-memory successfully for(auto& index_record: index_records) { nlohmann::json res; if(index_record.indexed.ok()) { if(index_record.is_update) { remove_flat_fields(index_record.new_doc); for(auto& field: fields) { if(!field.store) { index_record.new_doc.erase(field.name); } } const std::string& serialized_json = index_record.new_doc.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore); bool write_ok = store->insert(get_seq_id_key(index_record.seq_id), serialized_json); if(!write_ok) { // we will attempt to reindex the old doc on a best-effort basis LOG(ERROR) << "Update to disk failed. Will restore old document"; remove_document(index_record.new_doc, index_record.seq_id, false); index_in_memory(index_record.old_doc, index_record.seq_id, index_record.operation, index_record.dirty_values); index_record.index_failure(500, "Could not write to on-disk storage."); } else { num_indexed++; index_record.index_success(); } } else { // remove flattened field values before storing on disk remove_flat_fields(index_record.doc); for(auto& field: fields) { if(!field.store) { index_record.doc.erase(field.name); } } const std::string& seq_id_str = std::to_string(index_record.seq_id); const std::string& serialized_json = index_record.doc.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore); rocksdb::WriteBatch batch; batch.Put(get_doc_id_key(index_record.doc["id"]), seq_id_str); batch.Put(get_seq_id_key(index_record.seq_id), serialized_json); bool write_ok = store->batch_write(batch); if(!write_ok) { // remove from in-memory store to keep the state synced LOG(ERROR) << "Write to disk failed. Will restore old document"; remove_document(index_record.doc, index_record.seq_id, false); index_record.index_failure(500, "Could not write to on-disk storage."); } else { num_indexed++; index_record.index_success(); } } res["success"] = index_record.indexed.ok(); if (return_doc & index_record.indexed.ok()) { res["document"] = index_record.is_update ? index_record.new_doc : index_record.doc; } if (return_id & index_record.indexed.ok()) { res["id"] = index_record.is_update ? index_record.new_doc["id"] : index_record.doc["id"]; } if(!index_record.indexed.ok()) { if(return_doc) { res["document"] = json_out[index_record.position]; } res["error"] = index_record.indexed.error(); if (!index_record.embedding_res.empty()) { res["embedding_error"] = nlohmann::json::object(); res["embedding_error"] = index_record.embedding_res; res["error"] = index_record.embedding_res["error"]; } res["code"] = index_record.indexed.code(); } } else { res["success"] = false; res["error"] = index_record.indexed.error(); res["code"] = index_record.indexed.code(); if(return_doc) { res["document"] = json_out[index_record.position]; } if (return_id && index_record.doc.contains("id")) { res["id"] = index_record.doc["id"]; } if (!index_record.embedding_res.empty()) { res["embedding_error"] = nlohmann::json::object(); res["error"] = index_record.embedding_res["error"]; res["embedding_error"] = index_record.embedding_res; } } json_out[index_record.position] = res.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore); } } Option<uint32_t> Collection::index_in_memory(nlohmann::json &document, uint32_t seq_id, const index_operation_t op, const DIRTY_VALUES& dirty_values) { std::unique_lock lock(mutex); Option<uint32_t> validation_op = validator_t::validate_index_in_memory(document, seq_id, default_sorting_field, search_schema, embedding_fields, op, false, fallback_field_type, dirty_values); if(!validation_op.ok()) { return validation_op; } index_record rec(0, seq_id, document, op, dirty_values); std::vector<index_record> index_batch; index_batch.emplace_back(std::move(rec)); Index::batch_memory_index(index, index_batch, default_sorting_field, search_schema, embedding_fields, fallback_field_type, token_separators, symbols_to_index, true); num_documents += 1; return Option<>(200); } size_t Collection::batch_index_in_memory(std::vector<index_record>& index_records, const size_t remote_embedding_batch_size, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries, const bool generate_embeddings) { std::unique_lock lock(mutex); size_t num_indexed = Index::batch_memory_index(index, index_records, default_sorting_field, search_schema, embedding_fields, fallback_field_type, token_separators, symbols_to_index, true, remote_embedding_batch_size, remote_embedding_timeout_ms, remote_embedding_num_tries,generate_embeddings, false, tsl::htrie_map<char, field>(), name, async_referenced_ins); num_documents += num_indexed; return num_indexed; } bool Collection::does_override_match(const override_t& override, std::string& query, std::set<uint32_t>& excluded_set, string& actual_query, const string& filter_query, bool already_segmented, const bool tags_matched, const bool wildcard_tag_matched, const std::map<size_t, std::vector<std::string>>& pinned_hits, const std::vector<std::string>& hidden_hits, std::vector<std::pair<uint32_t, uint32_t>>& included_ids, std::vector<uint32_t>& excluded_ids, std::vector<const override_t*>& filter_overrides, bool& filter_curated_hits, std::string& curated_sort_by, nlohmann::json& override_metadata) const { if(!wildcard_tag_matched && !tags_matched && !override.rule.tags.empty()) { // only untagged overrides must be considered when no tags are given in the query return false; } auto now_epoch = int64_t(std::time(0)); if(override.effective_from_ts != -1 && now_epoch < override.effective_from_ts) { return false; } if(override.effective_to_ts != -1 && now_epoch > override.effective_to_ts) { return false; } // ID-based overrides are applied first as they take precedence over filter-based overrides if(!override.filter_by.empty()) { filter_overrides.push_back(&override); } if((wildcard_tag_matched || tags_matched) && override.rule.query.empty() && override.rule.filter_by.empty()) { // allowed } else { bool filter_by_match = (override.rule.query.empty() && override.rule.match.empty() && !override.rule.filter_by.empty() && override.rule.filter_by == filter_query); bool query_match = (override.rule.match == override_t::MATCH_EXACT && override.rule.normalized_query == query) || (override.rule.match == override_t::MATCH_CONTAINS && StringUtils::contains_word(query, override.rule.normalized_query)); if(!filter_by_match && !query_match) { return false; } if(!override.rule.filter_by.empty() && override.rule.filter_by != filter_query) { return false; } } // have to ensure that dropped hits take precedence over added hits for(const auto & hit: override.drop_hits) { Option<uint32_t> seq_id_op = doc_id_to_seq_id(hit.doc_id); if(seq_id_op.ok()) { excluded_ids.push_back(seq_id_op.get()); excluded_set.insert(seq_id_op.get()); } } for(const auto & hit: override.add_hits) { Option<uint32_t> seq_id_op = doc_id_to_seq_id(hit.doc_id); if(!seq_id_op.ok()) { continue; } uint32_t seq_id = seq_id_op.get(); bool excluded = (excluded_set.count(seq_id) != 0); if(!excluded) { included_ids.emplace_back(seq_id, hit.position); } } if(!override.replace_query.empty()) { actual_query = override.replace_query; } else if(override.remove_matched_tokens && override.filter_by.empty()) { // don't prematurely remove tokens from query because dynamic filtering will require them StringUtils::replace_all(query, override.rule.normalized_query, ""); StringUtils::trim(query); if(query.empty()) { query = "*"; } actual_query = query; } filter_curated_hits = override.filter_curated_hits; curated_sort_by = override.sort_by; if(override_metadata.empty()) { override_metadata = override.metadata; } return true; } void Collection::curate_results(string& actual_query, const string& filter_query, bool enable_overrides, bool already_segmented, const std::set<std::string>& tags, const std::map<size_t, std::vector<std::string>>& pinned_hits, const std::vector<std::string>& hidden_hits, std::vector<std::pair<uint32_t, uint32_t>>& included_ids, std::vector<uint32_t>& excluded_ids, std::vector<const override_t*>& filter_overrides, bool& filter_curated_hits, std::string& curated_sort_by, nlohmann::json& override_metadata) const { std::set<uint32_t> excluded_set; // If pinned or hidden hits are provided, they take precedence over overrides // have to ensure that hidden hits take precedence over included hits if(!hidden_hits.empty()) { for(const auto & hit: hidden_hits) { Option<uint32_t> seq_id_op = doc_id_to_seq_id(hit); if(seq_id_op.ok()) { excluded_ids.push_back(seq_id_op.get()); excluded_set.insert(seq_id_op.get()); } } } if(enable_overrides && !overrides.empty()) { std::string query; if(actual_query == "*") { query = "*"; } else { std::vector<std::string> tokens; Tokenizer tokenizer(actual_query, true, false, "", symbols_to_index, token_separators); tokenizer.tokenize(tokens); query = StringUtils::join(tokens, " "); } if(!tags.empty()) { bool all_tags_found = false; std::set<std::string> found_overrides; if(tags.size() > 1) { // check for AND match only when multiple tags are sent const auto& tag = *tags.begin(); auto override_ids_it = override_tags.find(tag); if (override_ids_it != override_tags.end()) { const auto &override_ids = override_ids_it->second; for(const auto& id: override_ids) { auto override_it = overrides.find(id); if(override_it == overrides.end()) { continue; } const auto& override = override_it->second; if(override.rule.tags == tags) { bool match_found = does_override_match(override, query, excluded_set, actual_query, filter_query, already_segmented, true, false, pinned_hits, hidden_hits, included_ids, excluded_ids, filter_overrides, filter_curated_hits, curated_sort_by, override_metadata); if(match_found) { all_tags_found = true; found_overrides.insert(id); if(override.stop_processing) { break; } } } } } } if(!all_tags_found) { // check for partial tag matches for(const auto& tag: tags) { auto override_ids_it = override_tags.find(tag); if (override_ids_it == override_tags.end()) { continue; } const auto &override_ids = override_ids_it->second; for(const auto& id: override_ids) { if(found_overrides.count(id) != 0) { continue; } auto override_it = overrides.find(id); if (override_it == overrides.end()) { continue; } const auto& override = override_it->second; std::set<std::string> matching_tags; std::set_intersection(override.rule.tags.begin(), override.rule.tags.end(), tags.begin(), tags.end(), std::inserter(matching_tags, matching_tags.begin())); if(matching_tags.empty()) { continue; } bool match_found = does_override_match(override, query, excluded_set, actual_query, filter_query, already_segmented, true, false, pinned_hits, hidden_hits, included_ids, excluded_ids, filter_overrides, filter_curated_hits, curated_sort_by, override_metadata); if(match_found) { found_overrides.insert(id); if(override.stop_processing) { break; } } } } } } else { // no override tags given for(const auto& override_kv: overrides) { const auto& override = override_kv.second; bool wildcard_tag = override.rule.tags.size() == 1 && *override.rule.tags.begin() == "*"; bool match_found = does_override_match(override, query, excluded_set, actual_query, filter_query, already_segmented, false, wildcard_tag, pinned_hits, hidden_hits, included_ids, excluded_ids, filter_overrides, filter_curated_hits, curated_sort_by, override_metadata); if(match_found && override.stop_processing) { break; } } } } if(!pinned_hits.empty()) { for(const auto& pos_ids: pinned_hits) { size_t pos = pos_ids.first; for(const std::string& id: pos_ids.second) { Option<uint32_t> seq_id_op = doc_id_to_seq_id(id); if(!seq_id_op.ok()) { continue; } uint32_t seq_id = seq_id_op.get(); bool excluded = (excluded_set.count(seq_id) != 0); if(!excluded) { included_ids.emplace_back(seq_id, pos); } } } } } Option<bool> Collection::validate_and_standardize_sort_fields_with_lock(const std::vector<sort_by> & sort_fields, std::vector<sort_by>& sort_fields_std, const bool is_wildcard_query, const bool is_vector_query, const std::string& query, const bool is_group_by_query, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) const { std::shared_lock lock(mutex); return validate_and_standardize_sort_fields(sort_fields, sort_fields_std, is_wildcard_query, is_vector_query, query, is_group_by_query, remote_embedding_timeout_ms, remote_embedding_num_tries, true); } Option<bool> Collection::validate_and_standardize_sort_fields(const std::vector<sort_by> & sort_fields, std::vector<sort_by>& sort_fields_std, const bool is_wildcard_query, const bool is_vector_query, const std::string& query, const bool is_group_by_query, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries, const bool is_reference_sort) const { uint32_t eval_sort_count = 0; size_t num_sort_expressions = 0; for(size_t i = 0; i < sort_fields.size(); i++) { const sort_by& _sort_field = sort_fields[i]; if (_sort_field.name[0] == '$') { // Reference sort_by auto parenthesis_index = _sort_field.name.find('('); std::string ref_collection_name = _sort_field.name.substr(1, parenthesis_index - 1); auto& cm = CollectionManager::get_instance(); auto ref_collection = cm.get_collection(ref_collection_name); if (ref_collection == nullptr) { return Option<bool>(400, "Referenced collection `" + ref_collection_name + "` in `sort_by` not found."); } // `CollectionManager::get_collection` accounts for collection alias being used and provides pointer to the // original collection. ref_collection_name = ref_collection->name; auto sort_by_str = _sort_field.name.substr(parenthesis_index + 1, _sort_field.name.size() - parenthesis_index - 2); std::vector<sort_by> ref_sort_fields; bool parsed_sort_by = CollectionManager::parse_sort_by_str(sort_by_str, ref_sort_fields); if (!parsed_sort_by) { return Option<bool>(400, "Reference `sort_by` is malformed."); } std::vector<sort_by> ref_sort_fields_std; auto sort_validation_op = ref_collection->validate_and_standardize_sort_fields_with_lock(ref_sort_fields, ref_sort_fields_std, is_wildcard_query, is_vector_query, query, is_group_by_query, remote_embedding_timeout_ms, remote_embedding_num_tries); std::vector<std::string> nested_join_coll_names; for (auto const& coll_name: _sort_field.nested_join_collection_names) { auto coll = cm.get_collection(coll_name); if (coll == nullptr) { return Option<bool>(400, "Referenced collection `" + coll_name + "` in `sort_by` not found."); } // `CollectionManager::get_collection` accounts for collection alias being used and provides pointer to the // original collection. nested_join_coll_names.emplace_back(coll->name); } for (auto& ref_sort_field_std: ref_sort_fields_std) { ref_sort_field_std.reference_collection_name = ref_collection_name; ref_sort_field_std.nested_join_collection_names.insert(ref_sort_field_std.nested_join_collection_names.begin(), nested_join_coll_names.begin(), nested_join_coll_names.end()); sort_fields_std.emplace_back(ref_sort_field_std); } if (!sort_validation_op.ok()) { return Option<bool>(sort_validation_op.code(), "Referenced collection `" + ref_collection_name + "`: " + sort_validation_op.error()); } continue; } else if (_sort_field.name == sort_field_const::eval) { sort_fields_std.emplace_back(sort_field_const::eval, _sort_field.order); auto& sort_field_std = sort_fields_std.back(); auto const& count = _sort_field.eval_expressions.size(); sort_field_std.eval.filter_trees = new filter_node_t*[count]{nullptr}; sort_field_std.eval_expressions = _sort_field.eval_expressions; sort_field_std.eval.scores = _sort_field.eval.scores; for (uint32_t j = 0; j < count; j++) { auto const& filter_exp = _sort_field.eval_expressions[j]; if (filter_exp.empty()) { return Option<bool>(400, "The eval expression in sort_by is empty."); } Option<bool> parse_filter_op = filter::parse_filter_query(filter_exp, search_schema, store, "", sort_field_std.eval.filter_trees[j]); if (!parse_filter_op.ok()) { return Option<bool>(parse_filter_op.code(), "Error parsing eval expression in sort_by clause."); } } eval_sort_count++; continue; } else if(_sort_field.name == sort_field_const::random_order) { sort_fields_std.emplace_back(_sort_field.name, _sort_field.order); auto& sort_field_std = sort_fields_std.back(); uint32_t seed = time(nullptr); sort_field_std.random_sort.initialize(seed); continue; } sort_by sort_field_std(_sort_field.name, _sort_field.order); if(sort_field_std.name.back() == ')') { // check if this is a geo field or text match field size_t paran_start = 0; while(paran_start < sort_field_std.name.size() && sort_field_std.name[paran_start] != '(') { paran_start++; } const std::string& actual_field_name = sort_field_std.name.substr(0, paran_start); const auto field_it = search_schema.find(actual_field_name); if(actual_field_name == sort_field_const::text_match) { std::vector<std::string> match_parts; const std::string& match_config = sort_field_std.name.substr(paran_start+1, sort_field_std.name.size() - paran_start - 2); StringUtils::split(match_config, match_parts, ":"); if(match_parts.size() != 2 || match_parts[0] != "buckets") { return Option<bool>(400, "Invalid sorting parameter passed for _text_match."); } if(!StringUtils::is_uint32_t(match_parts[1])) { return Option<bool>(400, "Invalid value passed for _text_match `buckets` configuration."); } sort_field_std.name = actual_field_name; sort_field_std.text_match_buckets = std::stoll(match_parts[1]); } else if(actual_field_name == sort_field_const::vector_query) { const std::string& vector_query_str = sort_field_std.name.substr(paran_start + 1, sort_field_std.name.size() - paran_start - 2); if(vector_query_str.empty()) { return Option<bool>(400, "The vector query in sort_by is empty."); } auto parse_vector_op = VectorQueryOps::parse_vector_query_str(vector_query_str, sort_field_std.vector_query.query, is_wildcard_query, this, true); if(!parse_vector_op.ok()) { return Option<bool>(400, parse_vector_op.error()); } auto vector_field_it = search_schema.find(sort_field_std.vector_query.query.field_name); if(vector_field_it == search_schema.end() || vector_field_it.value().num_dim == 0) { return Option<bool>(400, "Could not find a field named `" + sort_field_std.vector_query.query.field_name + "` in vector index."); } if(!sort_field_std.vector_query.query.queries.empty()) { if(embedding_fields.find(sort_field_std.vector_query.query.field_name) == embedding_fields.end()) { return Option<bool>(400, "`queries` parameter is only supported for auto-embedding fields."); } std::vector<std::vector<float>> embeddings; for(const auto& q: sort_field_std.vector_query.query.queries) { EmbedderManager& embedder_manager = EmbedderManager::get_instance(); auto embedder_op = embedder_manager.get_text_embedder(vector_field_it.value().embed[fields::model_config]); if(!embedder_op.ok()) { return Option<bool>(400, embedder_op.error()); } auto remote_embedding_timeout_us = remote_embedding_timeout_ms * 1000; if((std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > remote_embedding_timeout_us) { std::string error = "Request timed out."; return Option<bool>(500, error); } auto embedder = embedder_op.get(); if(embedder->is_remote()) { if(remote_embedding_num_tries == 0) { std::string error = "`remote_embedding_num_tries` must be greater than 0."; return Option<bool>(400, error); } } std::string embed_query = embedder_manager.get_query_prefix(vector_field_it.value().embed[fields::model_config]) + q; auto embedding_op = embedder->Embed(embed_query, remote_embedding_timeout_ms, remote_embedding_num_tries); if(!embedding_op.success) { if(embedding_op.error.contains("error")) { return Option<bool>(400, embedding_op.error["error"].get<std::string>()); } else { return Option<bool>(400, embedding_op.error.dump()); } } embeddings.emplace_back(embedding_op.embedding); } if(sort_field_std.vector_query.query.query_weights.empty()) { // get average of all embeddings std::vector<float> avg_embedding(vector_field_it.value().num_dim, 0); for(const auto& embedding: embeddings) { for(size_t i = 0; i < embedding.size(); i++) { avg_embedding[i] += embedding[i]; } } for(size_t i = 0; i < avg_embedding.size(); i++) { avg_embedding[i] /= embeddings.size(); } sort_field_std.vector_query.query.values = avg_embedding; } else { std::vector<float> weighted_embeddings(vector_field_it.value().num_dim, 0); for(size_t i = 0; i < embeddings.size(); i++) { for(size_t j = 0; j < embeddings[i].size(); j++) { weighted_embeddings[j] += embeddings[i][j] * sort_field_std.vector_query.query.query_weights[i]; } } sort_field_std.vector_query.query.values = weighted_embeddings; } } if(sort_field_std.vector_query.query.values.empty() && embedding_fields.find(sort_field_std.vector_query.query.field_name) != embedding_fields.end()) { // generate embeddings for the query EmbedderManager& embedder_manager = EmbedderManager::get_instance(); auto embedder_op = embedder_manager.get_text_embedder(vector_field_it.value().embed[fields::model_config]); if(!embedder_op.ok()) { return Option<bool>(embedder_op.code(), embedder_op.error()); } auto embedder = embedder_op.get(); if(embedder->is_remote() && remote_embedding_num_tries == 0) { std::string error = "`remote_embedding_num_tries` must be greater than 0."; return Option<bool>(400, error); } std::string embed_query = embedder_manager.get_query_prefix(vector_field_it.value().embed[fields::model_config]) + query; auto embedding_op = embedder->Embed(embed_query, remote_embedding_timeout_ms, remote_embedding_num_tries); if(!embedding_op.success) { if(embedding_op.error.contains("error")) { return Option<bool>(400, embedding_op.error["error"].get<std::string>()); } else { return Option<bool>(400, embedding_op.error.dump()); } } sort_field_std.vector_query.query.values = embedding_op.embedding; } const auto& vector_index_map = index->_get_vector_index(); if(vector_index_map.find(sort_field_std.vector_query.query.field_name) == vector_index_map.end()) { return Option<bool>(400, "Field `" + sort_field_std.vector_query.query.field_name + "` does not have a vector index."); } if(vector_field_it.value().num_dim != sort_field_std.vector_query.query.values.size()) { return Option<bool>(400, "Query field `" + sort_field_std.vector_query.query.field_name + "` must have " + std::to_string(vector_field_it.value().num_dim) + " dimensions."); } sort_field_std.vector_query.vector_index = vector_index_map.at(sort_field_std.vector_query.query.field_name); if(sort_field_std.vector_query.vector_index->distance_type == cosine) { std::vector<float> normalized_values(sort_field_std.vector_query.query.values.size()); hnsw_index_t::normalize_vector(sort_field_std.vector_query.query.values, normalized_values); sort_field_std.vector_query.query.values = normalized_values; } sort_field_std.name = actual_field_name; } else if(actual_field_name == sort_field_const::random_order) { const std::string &random_sort_str = sort_field_std.name.substr(paran_start + 1, sort_field_std.name.size() - paran_start -2); uint32_t seed = time(nullptr); if (!random_sort_str.empty()) { if(random_sort_str[0] == '-' || !StringUtils::is_uint32_t(random_sort_str)) { return Option<bool>(400, "Only positive integer seed value is allowed."); } seed = static_cast<uint32_t>(std::stoul(random_sort_str)); } sort_field_std.random_sort.initialize(seed); sort_field_std.name = actual_field_name; } else { if(field_it == search_schema.end()) { std::string error = "Could not find a field named `" + actual_field_name + "` in the schema for sorting."; return Option<bool>(404, error); } std::string error = "Bad syntax for sorting field `" + actual_field_name + "`"; if(!field_it.value().is_geopoint()) { // check for null value order const std::string &sort_params_str = sort_field_std.name.substr(paran_start + 1, sort_field_std.name.size() - paran_start - 2); std::vector<std::string> value_params, param_parts; StringUtils::split(sort_params_str, value_params, ","); for(const auto& value_param : value_params) { param_parts.clear(); StringUtils::split(value_param, param_parts, ":"); if (param_parts.size() != 2) { return Option<bool>(400, error); } if(param_parts[0] == sort_field_const::func) { if(param_parts[1]!= sort_field_const::gauss && param_parts[1]!= sort_field_const::exp && param_parts[1]!= sort_field_const::linear && param_parts[1]!= sort_field_const::diff) { return Option<bool>(400, "Bad syntax. Not a valid decay function key `" + param_parts[1] + "`."); } auto action_op = magic_enum::enum_cast<sort_by::sort_by_params_t>(param_parts[1]); if(action_op.has_value()) { sort_field_std.sort_by_param = action_op.value(); } } else if(param_parts[0] == sort_field_const::scale) { if (!StringUtils::is_integer(param_parts[1]) || param_parts[1] == "0") { return Option<bool>(400, "sort_by: scale param should be non-zero integer."); } sort_field_std.scale = std::stoll(param_parts[1]); } else if(param_parts[0] == sort_field_const::origin) { if (!StringUtils::is_integer(param_parts[1])) { return Option<bool>(400, "sort_by: origin param should be integer."); } sort_field_std.origin_val = std::stoll(param_parts[1]); } else if(param_parts[0] == sort_field_const::offset) { if (!StringUtils::is_integer(param_parts[1])) { return Option<bool>(400, "sort_by: offset param should be integer."); } sort_field_std.offset = std::stoll(param_parts[1]); } else if(param_parts[0] == sort_field_const::decay) { if (!StringUtils::is_float(param_parts[1])) { return Option<bool>(400, "sort_by: decay param should be float."); } auto val = std::stof(param_parts[1]); if(val < 0.0f || val > 1.0f) { return Option<bool>(400, "sort_by: decay param should be float in range [0.0, 1.0]."); } sort_field_std.decay_val = val; } else { if (param_parts[0] != sort_field_const::missing_values) { return Option<bool>(400, error); } auto missing_values_op = magic_enum::enum_cast<sort_by::missing_values_t>( param_parts[1]); if (missing_values_op.has_value()) { sort_field_std.missing_values = missing_values_op.value(); } else { return Option<bool>(400, error); } } } if((sort_field_std.sort_by_param == sort_by::linear || sort_field_std.sort_by_param == sort_by::exp || sort_field_std.sort_by_param == sort_by::gauss) && (sort_field_std.origin_val == INT64_MAX || sort_field_std.scale == INT64_MAX)) { return Option<bool>(400, "Bad syntax. origin and scale are mandatory params for decay function " + std::string(magic_enum::enum_name(sort_field_std.sort_by_param))); } else if(sort_field_std.sort_by_param == sort_by::diff && sort_field_std.origin_val == INT64_MAX) { return Option<bool>(400, "Bad syntax. origin param is mandatory for diff function."); } else if(sort_field_std.sort_by_param != sort_by::linear && sort_field_std.sort_by_param != sort_by::exp && sort_field_std.sort_by_param != sort_by::gauss && sort_field_std.sort_by_param != sort_by::diff && sort_field_std.origin_val != INT64_MAX) { return Option<bool>(400, "Bad syntax. Missing param `func`."); } } else { const std::string& geo_coordstr = sort_field_std.name.substr(paran_start+1, sort_field_std.name.size() - paran_start - 2); // e.g. geopoint_field(lat1, lng1, exclude_radius: 10 miles) std::vector<std::string> geo_parts; StringUtils::split(geo_coordstr, geo_parts, ","); if(geo_parts.size() != 2 && geo_parts.size() != 3) { return Option<bool>(400, error); } if(!StringUtils::is_float(geo_parts[0]) || !StringUtils::is_float(geo_parts[1])) { return Option<bool>(400, error); } if(geo_parts.size() == 3) { // try to parse the exclude radius option bool is_exclude_option = false; if(StringUtils::begins_with(geo_parts[2], sort_field_const::exclude_radius)) { is_exclude_option = true; } else if(StringUtils::begins_with(geo_parts[2], sort_field_const::precision)) { is_exclude_option = false; } else { return Option<bool>(400, error); } std::vector<std::string> param_parts; StringUtils::split(geo_parts[2], param_parts, ":"); if(param_parts.size() != 2) { return Option<bool>(400, error); } // param_parts[1] is the value, in either "20km" or "20 km" format if(param_parts[1].size() < 2) { return Option<bool>(400, error); } std::string unit = param_parts[1].substr(param_parts[1].size()-2, 2); if(unit != "km" && unit != "mi") { return Option<bool>(400, "Sort field's parameter unit must be either `km` or `mi`."); } std::vector<std::string> dist_values; StringUtils::split(param_parts[1], dist_values, unit); if(dist_values.size() != 1) { return Option<bool>(400, error); } if(!StringUtils::is_float(dist_values[0])) { return Option<bool>(400, error); } int32_t value_meters; if(unit == "km") { value_meters = std::stof(dist_values[0]) * 1000; } else if(unit == "mi") { value_meters = std::stof(dist_values[0]) * 1609.34; } else { return Option<bool>(400, "Sort field's parameter " "unit must be either `km` or `mi`."); } if(value_meters <= 0) { return Option<bool>(400, "Sort field's parameter must be a positive number."); } if(is_exclude_option) { sort_field_std.exclude_radius = value_meters; } else { sort_field_std.geo_precision = value_meters; } } double lat = std::stod(geo_parts[0]); double lng = std::stod(geo_parts[1]); int64_t lat_lng = GeoPoint::pack_lat_lng(lat, lng); sort_field_std.geopoint = lat_lng; } sort_field_std.name = actual_field_name; } } if (sort_field_std.name != sort_field_const::text_match && sort_field_std.name != sort_field_const::eval && sort_field_std.name != sort_field_const::seq_id && sort_field_std.name != sort_field_const::group_found && sort_field_std.name != sort_field_const::vector_distance && sort_field_std.name != sort_field_const::vector_query && sort_field_std.name != sort_field_const::random_order) { const auto field_it = search_schema.find(sort_field_std.name); if(field_it == search_schema.end() || !field_it.value().sort || !field_it.value().index) { std::string error = "Could not find a field named `" + sort_field_std.name + "` in the schema for sorting."; return Option<bool>(404, error); } } if(sort_field_std.name == sort_field_const::group_found && is_group_by_query == false) { std::string error = "group_by parameters should not be empty when using sort_by group_found"; return Option<bool>(404, error); } if(sort_field_std.name == sort_field_const::vector_distance && !is_vector_query) { std::string error = "sort_by vector_distance is only supported for vector queries, semantic search and hybrid search."; return Option<bool>(404, error); } StringUtils::toupper(sort_field_std.order); if(sort_field_std.order != sort_field_const::asc && sort_field_std.order != sort_field_const::desc) { std::string error = "Order for field` " + sort_field_std.name + "` should be either ASC or DESC."; return Option<bool>(400, error); } sort_fields_std.emplace_back(sort_field_std); } if (is_reference_sort) { if (eval_sort_count > 1) { std::string message = "Only one sorting eval expression is allowed."; return Option<bool>(422, message); } return Option<bool>(true); } /* 1. Empty: [match_score, dsf] upstream 2. ONE : [usf, match_score] 3. TWO : [usf1, usf2, match_score] 4. THREE: do nothing */ if(sort_fields_std.empty()) { if(!is_wildcard_query) { sort_fields_std.emplace_back(sort_field_const::text_match, sort_field_const::desc); } if(is_vector_query) { sort_fields_std.emplace_back(sort_field_const::vector_distance, sort_field_const::asc); } if(!default_sorting_field.empty()) { auto def_it = search_schema.find(default_sorting_field); if(def_it == search_schema.end() || !def_it->index) { return Option<bool>(400, "Default sorting field not found in the schema or it has been marked as a " "non-indexed field."); } sort_fields_std.emplace_back(default_sorting_field, sort_field_const::desc); } else { sort_fields_std.emplace_back(sort_field_const::seq_id, sort_field_const::desc); } } bool found_match_score = false; bool found_vector_distance = false; for(const auto & sort_field : sort_fields_std) { if(sort_field.name == sort_field_const::text_match) { found_match_score = true; } if(sort_field.name == sort_field_const::vector_distance) { found_vector_distance = true; } if(found_match_score && found_vector_distance) { break; } } if(!found_match_score && !is_wildcard_query && sort_fields_std.size() < 3) { sort_fields_std.emplace_back(sort_field_const::text_match, sort_field_const::desc); } // only add vector_distance if it is a semantic search, do not add it for hybrid search if(!found_vector_distance && is_vector_query && is_wildcard_query && sort_fields_std.size() < 3) { sort_fields_std.emplace_back(sort_field_const::vector_distance, sort_field_const::asc); } if(sort_fields_std.size() > 3) { std::string message = "Only upto 3 sort_by fields can be specified."; return Option<bool>(422, message); } if(eval_sort_count > 1) { std::string message = "Only one sorting eval expression is allowed."; return Option<bool>(422, message); } return Option<bool>(true); } Option<bool> Collection::extract_field_name(const std::string& field_name, const tsl::htrie_map<char, field>& search_schema, std::vector<std::string>& processed_search_fields, const bool extract_only_string_fields, const bool enable_nested_fields, const bool handle_wildcard, const bool& include_id) { // Reference to other collection if (field_name[0] == '$') { processed_search_fields.push_back(field_name); return Option<bool>(true); } if(field_name == "id") { processed_search_fields.push_back(field_name); return Option<bool>(true); } bool is_wildcard = field_name.find('*') != std::string::npos; if (is_wildcard && !handle_wildcard) { return Option<bool>(400, "Pattern `" + field_name + "` is not allowed."); } if (is_wildcard && include_id && field_name.size() < 4 && (field_name == "*" || field_name == "i*" || field_name == "id*")) { processed_search_fields.emplace_back("id"); } // If wildcard, remove * auto prefix_it = search_schema.equal_prefix_range(field_name.substr(0, field_name.size() - is_wildcard)); bool field_found = false; for(auto kv = prefix_it.first; kv != prefix_it.second; ++kv) { const bool exact_key_match = (kv.key().size() == field_name.size()); const bool exact_primitive_match = exact_key_match && !kv.value().is_object(); const bool text_embedding = kv.value().type == field_types::FLOAT_ARRAY && kv.value().num_dim > 0; if(extract_only_string_fields && !kv.value().is_string() && !text_embedding) { if(exact_primitive_match && !is_wildcard) { // upstream needs to be returned an error return Option<bool>(400, "Field `" + field_name + "` should be a string or a string array."); } continue; } // Prefix matches should only be included if it is a wildcard field name or if the matched field is nested. // If we have the fields `title`, `title_ko`, and `title.foo`, and `title` is passed, it should only match `title` // and `title.foo` fields. `title*` should match all the fields. const bool is_nested_field = kv.value().nested; if(!exact_key_match && !is_wildcard && !is_nested_field) { continue; } if (exact_primitive_match || (is_wildcard && kv->index) || text_embedding || // field_name prefix must be followed by a "." to indicate an object search (enable_nested_fields && kv.key().size() > field_name.size() && kv.key()[field_name.size()] == '.')) { processed_search_fields.push_back(kv.key()); field_found = true; } } if (is_wildcard && extract_only_string_fields && !field_found) { std::string error = "No string or string array field found matching the pattern `" + field_name + "` in the schema."; return Option<bool>(404, error); } else if (!field_found) { std::string error = is_wildcard ? "No field found matching the pattern `" : "Could not find a field named `" + field_name + "` in the schema."; return Option<bool>(404, error); } return Option<bool>(true); } Option<int64_t> Collection::get_geo_distance_with_lock(const std::string& geo_field_name, const uint32_t& seq_id, const S2LatLng& reference_lat_lng, const bool& round_distance) const { std::shared_lock lock(mutex); return index->get_geo_distance_with_lock(geo_field_name, seq_id, reference_lat_lng, round_distance); } Option<nlohmann::json> Collection::search(std::string raw_query, const std::vector<std::string>& raw_search_fields, const std::string & filter_query, const std::vector<std::string>& facet_fields, const std::vector<sort_by> & sort_fields, const std::vector<uint32_t>& num_typos, size_t per_page, const size_t page, token_ordering token_order, const std::vector<bool>& prefixes, const size_t drop_tokens_threshold, const spp::sparse_hash_set<std::string> & include_fields, const spp::sparse_hash_set<std::string> & exclude_fields, const size_t max_facet_values, const std::string & simple_facet_query, const size_t snippet_threshold, const size_t highlight_affix_num_tokens, const std::string& highlight_full_fields, size_t typo_tokens_threshold, const std::string& pinned_hits_str, const std::string& hidden_hits_str, const std::vector<std::string>& raw_group_by_fields, size_t group_limit, const std::string& highlight_start_tag, const std::string& highlight_end_tag, std::vector<uint32_t> raw_query_by_weights, size_t limit_hits, bool prioritize_exact_match, bool pre_segmented_query, bool enable_overrides, const std::string& highlight_fields, const bool exhaustive_search, const size_t search_stop_millis, const size_t min_len_1typo, const size_t min_len_2typo, enable_t split_join_tokens, const size_t max_candidates, const std::vector<enable_t>& infixes, const size_t max_extra_prefix, const size_t max_extra_suffix, const size_t facet_query_num_typos, const bool filter_curated_hits_option, const bool prioritize_token_position, const std::string& vector_query_str, const bool enable_highlight_v1, const uint64_t search_time_start_us, const text_match_type_t match_type, const size_t facet_sample_percent, const size_t facet_sample_threshold, const size_t page_offset, const std::string& facet_index_type, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries, const std::string& stopwords_set, const std::vector<std::string>& facet_return_parent, const std::vector<ref_include_exclude_fields>& ref_include_exclude_fields_vec, const std::string& drop_tokens_mode, const bool prioritize_num_matching_fields, const bool group_missing_values, const bool conversation, const std::string& conversation_model_id, std::string conversation_id, const std::string& override_tags_str, const std::string& voice_query, bool enable_typos_for_numerical_tokens, bool enable_synonyms, bool synonym_prefix, uint32_t synonyms_num_typos, bool enable_lazy_filter, bool enable_typos_for_alpha_numerical_tokens, const size_t& max_filter_by_candidates, bool rerank_hybrid_matches, bool validate_field_names) const { std::shared_lock lock(mutex); // setup thread local vars search_stop_us = search_stop_millis * 1000; search_begin_us = (search_time_start_us != 0) ? search_time_start_us : std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count(); search_cutoff = false; if(raw_query != "*" && raw_search_fields.empty()) { return Option<nlohmann::json>(400, "No search fields specified for the query."); } if(!raw_search_fields.empty() && !raw_query_by_weights.empty() && raw_search_fields.size() != raw_query_by_weights.size()) { return Option<nlohmann::json>(400, "Number of weights in `query_by_weights` does not match " "number of `query_by` fields."); } if(!raw_group_by_fields.empty() && (group_limit == 0 || group_limit > GROUP_LIMIT_MAX)) { return Option<nlohmann::json>(400, "Value of `group_limit` must be between 1 and " + std::to_string(GROUP_LIMIT_MAX) + "."); } if(!raw_search_fields.empty() && raw_search_fields.size() != num_typos.size()) { if(num_typos.size() != 1) { return Option<nlohmann::json>(400, "Number of values in `num_typos` does not match " "number of `query_by` fields."); } } if(!raw_search_fields.empty() && raw_search_fields.size() != prefixes.size()) { if(prefixes.size() != 1) { return Option<nlohmann::json>(400, "Number of prefix values in `prefix` does not match " "number of `query_by` fields."); } } if(!raw_search_fields.empty() && raw_search_fields.size() != infixes.size()) { if(infixes.size() != 1) { return Option<nlohmann::json>(400, "Number of infix values in `infix` does not match " "number of `query_by` fields."); } } if(facet_sample_percent > 100) { return Option<nlohmann::json>(400, "Value of `facet_sample_percent` must be less than 100."); } if(synonyms_num_typos > 2) { return Option<nlohmann::json>(400, "Value of `synonym_num_typos` must not be greater than 2."); } if(raw_group_by_fields.empty()) { group_limit = 0; } vector_query_t vector_query; if(!vector_query_str.empty()) { bool is_wildcard_query = (raw_query == "*" || raw_query.empty()); auto parse_vector_op = parse_and_validate_vector_query(vector_query_str, vector_query, is_wildcard_query, remote_embedding_timeout_ms, remote_embedding_num_tries, per_page); if(!parse_vector_op.ok()) { return Option<nlohmann::json>(parse_vector_op.code(), parse_vector_op.error()); } } // validate search fields std::vector<search_field_t> processed_search_fields; std::vector<uint32_t> query_by_weights; size_t num_embed_fields = 0; std::string query = raw_query; std::string transcribed_query; std::string conversation_standalone_query = raw_query; if(!voice_query.empty()) { if(!vq_model) { return Option<nlohmann::json>(400, "Voice query is not enabled. Please set `voice_query_model` for this collection."); } auto transcribe_res = vq_model->transcribe(voice_query); if(!transcribe_res.ok()) { return Option<nlohmann::json>(transcribe_res.code(), transcribe_res.error()); } query = transcribe_res.get(); transcribed_query = query; } if(conversation) { if(conversation_model_id.empty()) { return Option<nlohmann::json>(400, "Conversation is enabled but no conversation model ID is provided."); } auto conversation_model_op = ConversationModelManager::get_model(conversation_model_id); if(!conversation_model_op.ok()) { return Option<nlohmann::json>(400, conversation_model_op.error()); } } if(!conversation_id.empty()) { if(!conversation) { return Option<nlohmann::json>(400, "Conversation ID provided but conversation is not enabled for this collection."); } auto conversation_history_op = ConversationManager::get_instance().get_conversation(conversation_id); if(!conversation_history_op.ok()) { return Option<nlohmann::json>(400, conversation_history_op.error()); } auto conversation_history = conversation_history_op.get(); auto conversation_model_op = ConversationModelManager::get_model(conversation_model_id); auto standalone_question_op = ConversationModel::get_standalone_question(conversation_history, raw_query, conversation_model_op.get()); if(!standalone_question_op.ok()) { return Option<nlohmann::json>(400, standalone_question_op.error()); } query = standalone_question_op.get(); conversation_standalone_query = query; } bool ignored_missing_fields = false; for(size_t i = 0; i < raw_search_fields.size(); i++) { const std::string& field_name = raw_search_fields[i]; if(field_name == "id") { // `id` field needs to be handled separately, we will not handle for now std::string error = "Cannot use `id` as a query by field."; return Option<nlohmann::json>(400, error); } else if (field_name[0] == '$' && field_name.find('(') != std::string::npos && field_name.find(')') != std::string::npos) { return Option<nlohmann::json>(400, "Query by reference is not yet supported."); } std::vector<std::string> expanded_search_fields; auto field_op = extract_field_name(field_name, search_schema, expanded_search_fields, true, enable_nested_fields); if(!field_op.ok()) { if(field_op.code() == 404 && !validate_field_names) { ignored_missing_fields = true; continue; } return Option<nlohmann::json>(field_op.code(), field_op.error()); } for(const auto& expanded_search_field: expanded_search_fields) { if (search_schema.count(expanded_search_field) == 0) { return Option<nlohmann::json>(404, "Could not find `" + expanded_search_field + "` field in the schema."); } auto search_field = search_schema.at(expanded_search_field); if(search_field.num_dim > 0) { num_embed_fields++; if(num_embed_fields > 1 || (!vector_query.field_name.empty() && search_field.name != vector_query.field_name)) { std::string error = "Only one embedding field is allowed in the query."; return Option<nlohmann::json>(400, error); } if(!search_field.index) { std::string error = "Field `" + search_field.name + "` is marked as a non-indexed field in the schema."; return Option<nlohmann::json>(400, error); } // if(EmbedderManager::model_dir.empty()) { // std::string error = "Text embedding is not enabled. Please set `model-dir` at startup."; // return Option<nlohmann::json>(400, error); // } if(query == "*") { // ignore embedding field if query is a wildcard continue; } if(embedding_fields.find(search_field.name) == embedding_fields.end()) { std::string error = "Vector field `" + search_field.name + "` is not an auto-embedding field, do not use `query_by` with it, use `vector_query` instead."; return Option<nlohmann::json>(400, error); } EmbedderManager& embedder_manager = EmbedderManager::get_instance(); auto embedder_op = embedder_manager.get_text_embedder(search_field.embed[fields::model_config]); if(!embedder_op.ok()) { return Option<nlohmann::json>(400, embedder_op.error()); } auto remote_embedding_timeout_us = remote_embedding_timeout_ms * 1000; if((std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > remote_embedding_timeout_us) { std::string error = "Request timed out."; return Option<nlohmann::json>(500, error); } auto embedder = embedder_op.get(); if(embedder->is_remote()) { // return error if prefix search is used with openai embedder if((prefixes.size() == 1 && prefixes[0] == true) || (prefixes.size() > 1 && prefixes[i] == true)) { std::string error = "Prefix search is not supported for remote embedders. Please set `prefix=false` as an additional search parameter to disable prefix searching."; return Option<nlohmann::json>(400, error); } if(remote_embedding_num_tries == 0) { std::string error = "`remote_embedding_num_tries` must be greater than 0."; return Option<nlohmann::json>(400, error); } } std::string embed_query = embedder_manager.get_query_prefix(search_field.embed[fields::model_config]) + query; auto embedding_op = embedder->Embed(embed_query, remote_embedding_timeout_ms, remote_embedding_num_tries); if(!embedding_op.success) { if(embedding_op.error.contains("error")) { return Option<nlohmann::json>(400, embedding_op.error["error"].get<std::string>()); } else { return Option<nlohmann::json>(400, embedding_op.error.dump()); } } std::vector<float> embedding = embedding_op.embedding; // params could have been set for an embed field, so we take a backup and restore vector_query.values = embedding; vector_query.field_name = field_name; continue; } auto query_weight = !raw_query_by_weights.empty() ? raw_query_by_weights[i] : 0; auto num_typo = i < num_typos.size() ? num_typos[i] : num_typos[0]; auto prefix = i < prefixes.size() ? prefixes[i] : prefixes[0]; auto infix = i < infixes.size() ? infixes[i] : infixes[0]; processed_search_fields.emplace_back(expanded_search_field, search_field.faceted_name(), query_weight, num_typo, prefix, infix); if(!raw_query_by_weights.empty()) { query_by_weights.push_back(query_weight); } } } if(!vector_query.field_name.empty() && vector_query.values.empty() && num_embed_fields == 0) { std::string error = "Vector query could not find any embedded fields."; return Option<nlohmann::json>(400, error); } if(!query_by_weights.empty() && processed_search_fields.size() != query_by_weights.size()) { std::string error = "Error, query_by_weights.size != query_by.size."; return Option<nlohmann::json>(400, error); } for(const auto& processed_search_field: processed_search_fields) { const auto& field_name = processed_search_field.name; field search_field = search_schema.at(field_name); if(!search_field.index) { std::string error = "Field `" + field_name + "` is marked as a non-indexed field in the schema."; return Option<nlohmann::json>(400, error); } if(search_field.type != field_types::STRING && search_field.type != field_types::STRING_ARRAY) { std::string error = "Field `" + field_name + "` should be a string or a string array."; return Option<nlohmann::json>(400, error); } } // validate group by fields std::vector<std::string> group_by_fields; for(const std::string& field_name: raw_group_by_fields) { auto field_op = extract_field_name(field_name, search_schema, group_by_fields, false, enable_nested_fields, false); if(!field_op.ok()) { return Option<nlohmann::json>(404, field_op.error()); } } for(const std::string& field_name: group_by_fields) { if(field_name == "id") { std::string error = "Cannot use `id` as a group by field."; return Option<nlohmann::json>(400, error); } field search_field = search_schema.at(field_name); // must be a facet field if(!search_field.is_facet()) { std::string error = "Group by field `" + field_name + "` should be a facet field."; return Option<nlohmann::json>(400, error); } } tsl::htrie_set<char> include_fields_full; tsl::htrie_set<char> exclude_fields_full; auto include_exclude_op = populate_include_exclude_fields(include_fields, exclude_fields, include_fields_full, exclude_fields_full); if(!include_exclude_op.ok()) { return Option<nlohmann::json>(include_exclude_op.code(), include_exclude_op.error()); } // process weights for search fields std::vector<search_field_t> weighted_search_fields; process_search_field_weights(processed_search_fields, query_by_weights, weighted_search_fields); const std::string doc_id_prefix = std::to_string(collection_id) + "_" + DOC_ID_PREFIX + "_"; filter_node_t* filter_tree_root = nullptr; Option<bool> parse_filter_op = filter::parse_filter_query(filter_query, search_schema, store, doc_id_prefix, filter_tree_root); std::unique_ptr<filter_node_t> filter_tree_root_guard(filter_tree_root); if(!parse_filter_op.ok()) { return Option<nlohmann::json>(parse_filter_op.code(), parse_filter_op.error()); } std::vector<facet> facets; // validate facet fields for(const std::string & facet_field: facet_fields) { const auto& res = parse_facet(facet_field, facets); if(!res.ok()){ if(res.code() == 404 && !validate_field_names) { continue; } return Option<nlohmann::json>(res.code(), res.error()); } } std::vector<facet_index_type_t> facet_index_types; std::vector<std::string> facet_index_str_types; StringUtils::split(facet_index_type, facet_index_str_types, ","); if(facet_index_str_types.empty()) { for(size_t i = 0; i < facets.size(); i++) { facet_index_types.push_back(automatic); } } else if(facet_index_str_types.size() == 1) { auto match_op = magic_enum::enum_cast<facet_index_type_t>(facet_index_str_types[0]); if(!match_op.has_value()) { return Option<nlohmann::json>(400, "Invalid facet index type: " + facet_index_str_types[0]); } for(size_t i = 0; i < facets.size(); i++) { facet_index_types.push_back(match_op.value()); } } else { for(const auto& facet_index_str_type: facet_index_str_types) { auto match_op = magic_enum::enum_cast<facet_index_type_t>(facet_index_str_type); if(match_op.has_value()) { facet_index_types.push_back(match_op.value()); } else { return Option<nlohmann::json>(400, "Invalid facet index type: " + facet_index_str_type); } } } if(facets.size() != facet_index_types.size()) { return Option<nlohmann::json>(400, "Size of facet_index_type does not match size of facets."); } // parse facet query facet_query_t facet_query = {"", ""}; if(!simple_facet_query.empty()) { size_t found_colon_index = simple_facet_query.find(':'); if(found_colon_index == std::string::npos) { std::string error = "Facet query must be in the `facet_field: value` format."; return Option<nlohmann::json>(400, error); } if(facet_fields.empty()) { std::string error = "The `facet_query` parameter is supplied without a `facet_by` parameter."; return Option<nlohmann::json>(400, error); } std::string&& facet_query_fname = simple_facet_query.substr(0, found_colon_index); StringUtils::trim(facet_query_fname); std::string&& facet_query_value = simple_facet_query.substr(found_colon_index+1, std::string::npos); StringUtils::trim(facet_query_value); if(facet_query_value.empty()) { // empty facet value, we will treat it as no facet query facet_query = {"", ""}; } else { // facet query field must be part of facet fields requested facet_query = { StringUtils::trim(facet_query_fname), facet_query_value }; bool found = false; for(const auto& facet : facets) { if(facet.field_name == facet_query.field_name) { found=true; break; } } if(!found) { std::string error = "Facet query refers to a facet field `" + facet_query.field_name + "` " + "that is not part of `facet_by` parameter."; return Option<nlohmann::json>(400, error); } if(search_schema.count(facet_query.field_name) == 0 || !search_schema.at(facet_query.field_name).facet) { std::string error = "Could not find a facet field named `" + facet_query.field_name + "` in the schema."; return Option<nlohmann::json>(404, error); } } } int per_page_max = Config::get_instance().get_max_per_page(); if(per_page > per_page_max) { std::string message = "Only upto " + std::to_string(per_page_max) + " hits can be fetched per page."; return Option<nlohmann::json>(422, message); } size_t offset = 0; if(page == 0 && page_offset != 0) { // if only offset is set, use that offset = page_offset; } else { // if both are set or none set, use page value (default is 1) size_t actual_page = (page == 0) ? 1 : page; offset = (per_page * (actual_page - 1)); } size_t fetch_size = std::min<size_t>(offset + per_page, limit_hits); if(token_order == NOT_SET) { if(default_sorting_field.empty()) { token_order = FREQUENCY; } else { token_order = MAX_SCORE; } } Option<drop_tokens_param_t> drop_tokens_param_op = parse_drop_tokens_mode(drop_tokens_mode); if(!drop_tokens_param_op.ok()) { return Option<nlohmann::json>(drop_tokens_param_op.code(), drop_tokens_param_op.error()); } auto drop_tokens_param = drop_tokens_param_op.get(); size_t total = 0; std::vector<uint32_t> excluded_ids; std::vector<std::pair<uint32_t, uint32_t>> included_ids; // ID -> position std::map<size_t, std::vector<std::string>> pinned_hits; Option<bool> pinned_hits_op = parse_pinned_hits(pinned_hits_str, pinned_hits); if(!pinned_hits_op.ok()) { return Option<nlohmann::json>(400, pinned_hits_op.error()); } std::vector<std::string> hidden_hits; StringUtils::split(hidden_hits_str, hidden_hits, ","); nlohmann::json override_metadata; std::vector<const override_t*> filter_overrides; std::string curated_sort_by; std::set<std::string> override_tag_set; std::vector<std::string> override_tags_vec; StringUtils::split(override_tags_str, override_tags_vec, ","); for(const auto& tag: override_tags_vec) { override_tag_set.insert(tag); } bool filter_curated_hits_overrides = false; curate_results(query, filter_query, enable_overrides, pre_segmented_query, override_tag_set, pinned_hits, hidden_hits, included_ids, excluded_ids, filter_overrides, filter_curated_hits_overrides, curated_sort_by, override_metadata); bool filter_curated_hits = filter_curated_hits_option || filter_curated_hits_overrides; /*for(auto& kv: included_ids) { LOG(INFO) << "key: " << kv.first; for(auto val: kv.second) { LOG(INFO) << val; } } LOG(INFO) << "Excludes:"; for(auto id: excluded_ids) { LOG(INFO) << id; } LOG(INFO) << "included_ids size: " << included_ids.size(); for(auto& group: included_ids) { for(uint32_t& seq_id: group.second) { LOG(INFO) << "seq_id: " << seq_id; } LOG(INFO) << "----"; } */ // Set query to * if it is semantic search if(!vector_query.field_name.empty() && processed_search_fields.empty()) { query = "*"; } // validate sort fields and standardize sort_fields_guard_t sort_fields_guard; std::vector<sort_by>& sort_fields_std = sort_fields_guard.sort_fields_std; bool is_wildcard_query = (query == "*"); bool is_group_by_query = group_by_fields.size() > 0; bool is_vector_query = !vector_query.field_name.empty(); if(curated_sort_by.empty()) { auto sort_validation_op = validate_and_standardize_sort_fields(sort_fields, sort_fields_std, is_wildcard_query, is_vector_query, raw_query, is_group_by_query, remote_embedding_timeout_ms, remote_embedding_num_tries); if(!sort_validation_op.ok()) { return Option<nlohmann::json>(sort_validation_op.code(), sort_validation_op.error()); } } else { std::vector<sort_by> curated_sort_fields; bool parsed_sort_by = CollectionManager::parse_sort_by_str(curated_sort_by, curated_sort_fields); if(!parsed_sort_by) { return Option<nlohmann::json>(400, "Parameter `sort_by` is malformed."); } auto sort_validation_op = validate_and_standardize_sort_fields(curated_sort_fields, sort_fields_std, is_wildcard_query, is_vector_query, raw_query, is_group_by_query, remote_embedding_timeout_ms, remote_embedding_num_tries); if(!sort_validation_op.ok()) { return Option<nlohmann::json>(sort_validation_op.code(), sort_validation_op.error()); } } // apply bucketing on text match score int match_score_index = -1; for(size_t i = 0; i < sort_fields_std.size(); i++) { if(sort_fields_std[i].name == sort_field_const::text_match && sort_fields_std[i].text_match_buckets != 0) { match_score_index = i; break; } } //LOG(INFO) << "Num indices used for querying: " << indices.size(); std::vector<query_tokens_t> field_query_tokens; std::vector<std::string> q_tokens; // used for auxillary highlighting std::vector<std::string> q_include_tokens; std::vector<std::string> q_unstemmed_tokens; if(weighted_search_fields.size() == 0) { if(!ignored_missing_fields) { // has to be a wildcard query field_query_tokens.emplace_back(query_tokens_t{}); parse_search_query(query, q_include_tokens, q_unstemmed_tokens, field_query_tokens[0].q_exclude_tokens, field_query_tokens[0].q_phrases, "", false, stopwords_set); process_filter_overrides(filter_overrides, q_include_tokens, token_order, filter_tree_root, included_ids, excluded_ids, override_metadata, enable_typos_for_numerical_tokens, enable_typos_for_alpha_numerical_tokens); for(size_t i = 0; i < q_include_tokens.size(); i++) { auto& q_include_token = q_include_tokens[i]; field_query_tokens[0].q_include_tokens.emplace_back(i, q_include_token, (i == q_include_tokens.size() - 1), q_include_token.size(), 0); } for(size_t i = 0; i < q_unstemmed_tokens.size(); i++) { auto& q_include_token = q_unstemmed_tokens[i]; field_query_tokens[0].q_unstemmed_tokens.emplace_back(i, q_include_token, (i == q_include_tokens.size() - 1), q_include_token.size(), 0); } } } else { field_query_tokens.emplace_back(query_tokens_t{}); auto most_weighted_field = search_schema.at(weighted_search_fields[0].name); const std::string & field_locale = most_weighted_field.locale; parse_search_query(query, q_include_tokens, q_unstemmed_tokens, field_query_tokens[0].q_exclude_tokens, field_query_tokens[0].q_phrases, field_locale, pre_segmented_query, stopwords_set, most_weighted_field.get_stemmer()); // process filter overrides first, before synonyms (order is important) // included_ids, excluded_ids process_filter_overrides(filter_overrides, q_include_tokens, token_order, filter_tree_root, included_ids, excluded_ids, override_metadata, enable_typos_for_numerical_tokens, enable_typos_for_alpha_numerical_tokens); for(size_t i = 0; i < q_include_tokens.size(); i++) { auto& q_include_token = q_include_tokens[i]; q_tokens.push_back(q_include_token); field_query_tokens[0].q_include_tokens.emplace_back(i, q_include_token, (i == q_include_tokens.size() - 1), q_include_token.size(), 0); } for(size_t i = 0; i < q_unstemmed_tokens.size(); i++) { auto& q_include_token = q_unstemmed_tokens[i]; field_query_tokens[0].q_unstemmed_tokens.emplace_back(i, q_include_token, (i == q_include_tokens.size() - 1), q_include_token.size(), 0); } for(auto& phrase: field_query_tokens[0].q_phrases) { for(auto& token: phrase) { q_tokens.push_back(token); } } for(size_t i = 1; i < weighted_search_fields.size(); i++) { field_query_tokens.emplace_back(query_tokens_t{}); field_query_tokens[i] = field_query_tokens[0]; } } // search all indices size_t index_id = 0; search_args* search_params = new search_args(field_query_tokens, weighted_search_fields, match_type, filter_tree_root, facets, included_ids, excluded_ids, sort_fields_std, facet_query, num_typos, max_facet_values, fetch_size, per_page, offset, token_order, prefixes, drop_tokens_threshold, typo_tokens_threshold, group_by_fields, group_limit, group_missing_values, default_sorting_field, prioritize_exact_match, prioritize_token_position, prioritize_num_matching_fields, exhaustive_search, 4, search_stop_millis, min_len_1typo, min_len_2typo, max_candidates, infixes, max_extra_prefix, max_extra_suffix, facet_query_num_typos, filter_curated_hits, split_join_tokens, vector_query, facet_sample_percent, facet_sample_threshold, drop_tokens_param, enable_lazy_filter, max_filter_by_candidates); std::unique_ptr<search_args> search_params_guard(search_params); auto search_op = index->run_search(search_params, name, facet_index_types, enable_typos_for_numerical_tokens, enable_synonyms, synonym_prefix, synonyms_num_typos, enable_typos_for_alpha_numerical_tokens, rerank_hybrid_matches); // filter_tree_root might be updated in Index::static_filter_query_eval. filter_tree_root_guard.release(); filter_tree_root_guard.reset(filter_tree_root); if (!search_op.ok()) { return Option<nlohmann::json>(search_op.code(), search_op.error()); } auto& raw_result_kvs = search_params->raw_result_kvs; auto& override_result_kvs = search_params->override_result_kvs; // for grouping we have to aggregate group set sizes to a count value if(group_limit) { total = search_params->groups_processed.size() + override_result_kvs.size(); } else { total = search_params->all_result_ids_len; } if(search_cutoff && total == 0) { // this can happen if other requests stopped this request from being processed // we should return an error so that request can be retried by client return Option<nlohmann::json>(408, "Request Timeout"); } if(match_score_index >= 0 && sort_fields_std[match_score_index].text_match_buckets > 0) { size_t num_buckets = sort_fields_std[match_score_index].text_match_buckets; const size_t max_kvs_bucketed = std::min<size_t>(Index::DEFAULT_TOPSTER_SIZE, raw_result_kvs.size()); if(max_kvs_bucketed >= num_buckets) { spp::sparse_hash_map<uint64_t, int64_t> result_scores; // only first `max_kvs_bucketed` elements are bucketed to prevent pagination issues past 250 records size_t block_len = (max_kvs_bucketed / num_buckets); size_t i = 0; while(i < max_kvs_bucketed) { int64_t anchor_score = raw_result_kvs[i][0]->scores[raw_result_kvs[i][0]->match_score_index]; size_t j = 0; while(j < block_len && i+j < max_kvs_bucketed) { result_scores[raw_result_kvs[i+j][0]->key] = raw_result_kvs[i+j][0]->scores[raw_result_kvs[i+j][0]->match_score_index]; raw_result_kvs[i+j][0]->scores[raw_result_kvs[i+j][0]->match_score_index] = anchor_score; j++; } i += j; } // sort again based on bucketed match score std::partial_sort(raw_result_kvs.begin(), raw_result_kvs.begin() + max_kvs_bucketed, raw_result_kvs.end(), Topster::is_greater_kv_group); // restore original scores for(i = 0; i < max_kvs_bucketed; i++) { raw_result_kvs[i][0]->scores[raw_result_kvs[i][0]->match_score_index] = result_scores[raw_result_kvs[i][0]->key]; } } } // Sort based on position in overridden list std::sort( override_result_kvs.begin(), override_result_kvs.end(), [](const std::vector<KV*>& a, std::vector<KV*>& b) -> bool { return a[0]->distinct_key < b[0]->distinct_key; } ); std::vector<std::vector<KV*>> result_group_kvs; size_t override_kv_index = 0; size_t raw_results_index = 0; // merge raw results and override results while(raw_results_index < raw_result_kvs.size()) { if(override_kv_index < override_result_kvs.size()) { size_t result_position = result_group_kvs.size() + 1; uint64_t override_position = override_result_kvs[override_kv_index][0]->distinct_key; if(result_position == override_position) { override_result_kvs[override_kv_index][0]->match_score_index = CURATED_RECORD_IDENTIFIER; result_group_kvs.push_back(override_result_kvs[override_kv_index]); override_kv_index++; continue; } } result_group_kvs.push_back(raw_result_kvs[raw_results_index]); raw_results_index++; } while(override_kv_index < override_result_kvs.size()) { override_result_kvs[override_kv_index][0]->match_score_index = CURATED_RECORD_IDENTIFIER; result_group_kvs.push_back({override_result_kvs[override_kv_index]}); override_kv_index++; } std::string facet_query_last_token; size_t facet_query_num_tokens = 0; // used to identify drop token scenario if(!facet_query.query.empty()) { // identify facet hash tokens auto fq_field = search_schema.at(facet_query.field_name); bool is_cyrillic = Tokenizer::is_cyrillic(fq_field.locale); bool normalise = is_cyrillic ? false : true; std::vector<std::string> facet_query_tokens; Tokenizer(facet_query.query, normalise, !fq_field.is_string(), fq_field.locale, symbols_to_index, token_separators, fq_field.get_stemmer()).tokenize(facet_query_tokens); facet_query_num_tokens = facet_query_tokens.size(); facet_query_last_token = facet_query_tokens.empty() ? "" : facet_query_tokens.back(); } const long start_result_index = offset; // `end_result_index` could be -1, so use signed type const long end_result_index = std::min(fetch_size, result_group_kvs.size()) - 1; // handle which fields have to be highlighted std::vector<highlight_field_t> highlight_items; std::vector<std::string> highlight_field_names; StringUtils::split(highlight_fields, highlight_field_names, ","); std::vector<std::string> highlight_full_field_names; StringUtils::split(highlight_full_fields, highlight_full_field_names, ","); if(query != "*") { process_highlight_fields(weighted_search_fields, raw_search_fields, include_fields_full, exclude_fields_full, highlight_field_names, highlight_full_field_names, infixes, q_tokens, search_params->qtoken_set, highlight_items); } nlohmann::json result = nlohmann::json::object(); result["found"] = total; if(group_limit != 0) { result["found_docs"] = search_params->all_result_ids_len; } if(exclude_fields.count("out_of") == 0) { result["out_of"] = num_documents.load(); } std::string hits_key = group_limit ? "grouped_hits" : "hits"; result[hits_key] = nlohmann::json::array(); uint8_t index_symbols[256] = {}; for(char c: symbols_to_index) { index_symbols[uint8_t(c)] = 1; } nlohmann::json docs_array = nlohmann::json::array(); // handle analytics query expansion std::string first_q = raw_query; expand_search_query(raw_query, offset, total, search_params, result_group_kvs, raw_search_fields, first_q); // construct results array for(long result_kvs_index = start_result_index; result_kvs_index <= end_result_index; result_kvs_index++) { const std::vector<KV*> & kv_group = result_group_kvs[result_kvs_index]; nlohmann::json group_hits; if(group_limit) { group_hits["hits"] = nlohmann::json::array(); } nlohmann::json& hits_array = group_limit ? group_hits["hits"] : result["hits"]; nlohmann::json group_key = nlohmann::json::array(); for(const KV* field_order_kv: kv_group) { const std::string& seq_id_key = get_seq_id_key((uint32_t) field_order_kv->key); nlohmann::json document; const Option<bool> & document_op = get_document_from_store(seq_id_key, document); if(!document_op.ok()) { LOG(ERROR) << "Document fetch error. " << document_op.error(); continue; } nlohmann::json highlight_res = nlohmann::json::object(); if(!highlight_items.empty()) { copy_highlight_doc(highlight_items, enable_nested_fields, document, highlight_res); remove_flat_fields(highlight_res); remove_reference_helper_fields(highlight_res); highlight_res.erase("id"); } nlohmann::json wrapper_doc; if(enable_highlight_v1) { wrapper_doc["highlights"] = nlohmann::json::array(); } std::vector<highlight_t> highlights; StringUtils string_utils; tsl::htrie_set<char> hfield_names; tsl::htrie_set<char> h_full_field_names; for(size_t i = 0; i < highlight_items.size(); i++) { auto& highlight_item = highlight_items[i]; const std::string& field_name = highlight_item.name; if(search_schema.count(field_name) == 0) { continue; } field search_field = search_schema.at(field_name); if(query != "*") { highlight_t highlight; highlight.field = search_field.name; bool found_highlight = false; bool found_full_highlight = false; highlight_result(raw_query, search_field, i, highlight_item.qtoken_leaves, field_order_kv, document, highlight_res, string_utils, snippet_threshold, highlight_affix_num_tokens, highlight_item.fully_highlighted, highlight_item.infix, highlight_start_tag, highlight_end_tag, index_symbols, highlight, found_highlight, found_full_highlight); if(!highlight.snippets.empty()) { highlights.push_back(highlight); } if(found_highlight) { hfield_names.insert(search_field.name); if(found_full_highlight) { h_full_field_names.insert(search_field.name); } } } } // explicit highlight fields could be parent of searched fields, so we will take a pass at that for(auto& hfield_name: highlight_full_field_names) { auto it = h_full_field_names.equal_prefix_range(hfield_name); if(it.first != it.second) { h_full_field_names.insert(hfield_name); } } if(highlight_field_names.empty()) { for(auto& raw_search_field: raw_search_fields) { auto it = hfield_names.equal_prefix_range(raw_search_field); if(it.first != it.second) { hfield_names.insert(raw_search_field); } } } else { for(auto& hfield_name: highlight_field_names) { auto it = hfield_names.equal_prefix_range(hfield_name); if(it.first != it.second) { hfield_names.insert(hfield_name); } } } // remove fields from highlight doc that were not highlighted if(!hfield_names.empty()) { prune_doc(highlight_res, hfield_names, tsl::htrie_set<char>(), ""); } else { highlight_res.clear(); } if(enable_highlight_v1) { std::sort(highlights.begin(), highlights.end()); for(const auto & highlight: highlights) { auto field_it = search_schema.find(highlight.field); if(field_it == search_schema.end() || field_it->nested) { // nested field highlighting will be available only in the new highlight structure. continue; } nlohmann::json h_json = nlohmann::json::object(); h_json["field"] = highlight.field; if(!highlight.indices.empty()) { h_json["matched_tokens"] = highlight.matched_tokens; h_json["indices"] = highlight.indices; h_json["snippets"] = highlight.snippets; if(!highlight.values.empty()) { h_json["values"] = highlight.values; } } else { h_json["matched_tokens"] = highlight.matched_tokens[0]; h_json["snippet"] = highlight.snippets[0]; if(!highlight.values.empty() && !highlight.values[0].empty()) { h_json["value"] = highlight.values[0]; } } wrapper_doc["highlights"].push_back(h_json); } } //wrapper_doc["seq_id"] = (uint32_t) field_order_kv->key; if(group_limit && group_key.empty()) { for(const auto& field_name: group_by_fields) { if(document.count(field_name) != 0) { group_key.push_back(document[field_name]); } } } remove_flat_fields(document); remove_reference_helper_fields(document); auto prune_op = prune_doc(document, include_fields_full, exclude_fields_full, "", 0, field_order_kv->reference_filter_results, const_cast<Collection *>(this), get_seq_id_from_key(seq_id_key), ref_include_exclude_fields_vec); if (!prune_op.ok()) { return Option<nlohmann::json>(prune_op.code(), prune_op.error()); } if(conversation) { docs_array.push_back(document); } wrapper_doc["document"] = document; wrapper_doc["highlight"] = highlight_res; if(field_order_kv->match_score_index == CURATED_RECORD_IDENTIFIER) { wrapper_doc["curated"] = true; } else if(field_order_kv->match_score_index >= 0) { wrapper_doc["text_match"] = field_order_kv->text_match_score; wrapper_doc["text_match_info"] = nlohmann::json::object(); populate_text_match_info(wrapper_doc["text_match_info"], field_order_kv->text_match_score, match_type, field_query_tokens[0].q_include_tokens.size()); if(!vector_query.field_name.empty()) { wrapper_doc["hybrid_search_info"] = nlohmann::json::object(); wrapper_doc["hybrid_search_info"]["rank_fusion_score"] = Index::int64_t_to_float(field_order_kv->scores[field_order_kv->match_score_index]); } } nlohmann::json geo_distances; for(size_t sort_field_index = 0; sort_field_index < sort_fields_std.size(); sort_field_index++) { const auto& sort_field = sort_fields_std[sort_field_index]; if(sort_field.geopoint != 0 && sort_field.geo_precision != 0) { S2LatLng reference_lat_lng; GeoPoint::unpack_lat_lng(sort_field.geopoint, reference_lat_lng); auto get_geo_distance_op = !sort_field.reference_collection_name.empty() ? index->get_referenced_geo_distance(sort_field, field_order_kv->key, field_order_kv->reference_filter_results, reference_lat_lng, true) : index->get_geo_distance_with_lock(sort_field.name, field_order_kv->key, reference_lat_lng, true); if (!get_geo_distance_op.ok()) { return Option<nlohmann::json>(get_geo_distance_op.code(), get_geo_distance_op.error()); } geo_distances[sort_field.name] = get_geo_distance_op.get(); } else if(sort_field.geopoint != 0) { geo_distances[sort_field.name] = std::abs(field_order_kv->scores[sort_field_index]); } else if(sort_field.name == sort_field_const::vector_query && !sort_field.vector_query.query.field_name.empty()) { wrapper_doc["vector_distance"] = -Index::int64_t_to_float(field_order_kv->scores[sort_field_index]); } } if(!geo_distances.empty()) { wrapper_doc["geo_distance_meters"] = geo_distances; } if(!vector_query.field_name.empty() && field_order_kv->vector_distance >= 0) { wrapper_doc["vector_distance"] = field_order_kv->vector_distance; } hits_array.push_back(wrapper_doc); } if(group_limit) { group_hits["group_key"] = group_key; const auto& itr = search_params->groups_processed.find(kv_group[0]->distinct_key); if(itr != search_params->groups_processed.end()) { group_hits["found"] = itr->second; } result["grouped_hits"].push_back(group_hits); } } if(conversation) { result["conversation"] = nlohmann::json::object(); result["conversation"]["query"] = raw_query; // remove all fields with vector type from docs_array for(const auto& field : search_schema) { if(field.type == field_types::FLOAT_ARRAY && field.num_dim > 0) { for(auto& doc : docs_array) { doc.erase(field.name); } } } auto conversation_model = ConversationModelManager::get_model(conversation_model_id).get(); auto min_required_bytes_op = ConversationModel::get_minimum_required_bytes(conversation_model); if(!min_required_bytes_op.ok()) { return Option<nlohmann::json>(min_required_bytes_op.code(), min_required_bytes_op.error()); } auto min_required_bytes = min_required_bytes_op.get(); if(conversation_model["max_bytes"].get<size_t>() < min_required_bytes + conversation_standalone_query.size()) { return Option<nlohmann::json>(400, "`max_bytes` of the conversation model is less than the minimum required bytes(" + std::to_string(min_required_bytes) + ")."); } // remove document with lowest score until total tokens is less than MAX_TOKENS while(docs_array.dump(0).size() > conversation_model["max_bytes"].get<size_t>() - min_required_bytes - conversation_standalone_query.size()) { try { if(docs_array.empty()) { break; } docs_array.erase(docs_array.size() - 1); } catch(...) { return Option<nlohmann::json>(400, "Failed to remove document from search results."); } } bool has_conversation_history = !conversation_id.empty(); auto qa_op = ConversationModel::get_answer(docs_array.dump(0), conversation_standalone_query, conversation_model); if(!qa_op.ok()) { return Option<nlohmann::json>(qa_op.code(), qa_op.error()); } result["conversation"]["answer"] = qa_op.get(); if(exclude_fields.count("conversation_history") != 0) { result["conversation"]["conversation_id"] = conversation_id; } auto formatted_question_op = ConversationModel::format_question(raw_query, conversation_model); if(!formatted_question_op.ok()) { return Option<nlohmann::json>(formatted_question_op.code(), formatted_question_op.error()); } auto formatted_answer_op = ConversationModel::format_answer(qa_op.get(), conversation_model); if(!formatted_answer_op.ok()) { return Option<nlohmann::json>(formatted_answer_op.code(), formatted_answer_op.error()); } nlohmann::json conversation_history = nlohmann::json::array(); conversation_history.push_back(formatted_question_op.get()); conversation_history.push_back(formatted_answer_op.get()); auto add_conversation_op = ConversationManager::get_instance().add_conversation(conversation_history, conversation_model, conversation_id); if(!add_conversation_op.ok()) { return Option<nlohmann::json>(add_conversation_op.code(), add_conversation_op.error()); } if(exclude_fields.count("conversation_history") == 0) { auto get_conversation_op = ConversationManager::get_instance().get_conversation(add_conversation_op.get()); if(!get_conversation_op.ok()) { return Option<nlohmann::json>(get_conversation_op.code(), get_conversation_op.error()); } result["conversation"]["conversation_history"] = get_conversation_op.get(); result["conversation"]["conversation_history"].erase("id"); } result["conversation"]["conversation_id"] = add_conversation_op.get(); } result["facet_counts"] = nlohmann::json::array(); // populate facets for(facet& a_facet: facets) { // Don't return zero counts for a wildcard facet. if (a_facet.is_wildcard_match && (((a_facet.is_intersected && a_facet.value_result_map.empty())) || (!a_facet.is_intersected && a_facet.result_map.empty()))) { continue; } // check for search cutoff elapse if((std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now(). time_since_epoch()).count() - search_begin_us) > search_stop_us) { search_cutoff = true; break; } nlohmann::json facet_result = nlohmann::json::object(); facet_result["field_name"] = a_facet.field_name; facet_result["sampled"] = a_facet.sampled; facet_result["counts"] = nlohmann::json::array(); std::vector<facet_value_t> facet_values; std::vector<facet_count_t> facet_counts; for (const auto & kv : a_facet.result_map) { facet_count_t v = kv.second; v.fhash = kv.first; v.sort_field_val = kv.second.sort_field_val; facet_counts.emplace_back(v); } for (const auto& kv : a_facet.value_result_map) { facet_count_t v = kv.second; v.fvalue = kv.first; v.fhash = StringUtils::hash_wy(kv.first.c_str(), kv.first.size()); facet_counts.emplace_back(v); } auto max_facets = std::min(max_facet_values, facet_counts.size()); auto nthElement = max_facets == facet_counts.size() ? max_facets - 1 : max_facets; std::nth_element(facet_counts.begin(), facet_counts.begin() + nthElement, facet_counts.end(), Collection::facet_count_compare); if(a_facet.is_range_query){ for(const auto& kv : a_facet.result_map){ auto facet_range_iter = a_facet.facet_range_map.find(kv.first); if(facet_range_iter != a_facet.facet_range_map.end()){ auto & facet_count = kv.second; facet_value_t facet_value = {facet_range_iter->second.range_label, std::string(), facet_count.count}; facet_values.emplace_back(facet_value); } else{ LOG (ERROR) << "range_id not found in result map."; } } } else { auto the_field = search_schema.at(a_facet.field_name); bool should_return_parent = std::find(facet_return_parent.begin(), facet_return_parent.end(), the_field.name) != facet_return_parent.end(); bool should_fetch_doc_from_store = ((a_facet.is_intersected && should_return_parent) || !a_facet.is_intersected); for(size_t fi = 0; fi < max_facets; fi++) { // remap facet value hash with actual string auto & facet_count = facet_counts[fi]; std::string value; nlohmann::json document; if(should_fetch_doc_from_store) { const std::string &seq_id_key = get_seq_id_key((uint32_t) facet_count.doc_id); const Option<bool> &document_op = get_document_from_store(seq_id_key, document); if (!document_op.ok()) { LOG(ERROR) << "Facet fetch error. " << document_op.error(); continue; } } if(a_facet.is_intersected) { value = facet_count.fvalue; //LOG(INFO) << "used intersection"; } else { // fetch actual facet value from representative doc id //LOG(INFO) << "used hashes"; bool facet_found = facet_value_to_string(a_facet, facet_count, document, value); if(!facet_found) { continue; } } highlight_t highlight; if(!facet_query.query.empty()) { bool use_word_tokenizer = Tokenizer::has_word_tokenizer(the_field.locale); bool normalise = !use_word_tokenizer; std::vector<std::string> fquery_tokens; Tokenizer(facet_query.query, true, false, the_field.locale, symbols_to_index, token_separators, the_field.get_stemmer()).tokenize(fquery_tokens); if(fquery_tokens.empty()) { continue; } std::vector<string>& ftokens = a_facet.is_intersected ? a_facet.fvalue_tokens[facet_count.fvalue] : a_facet.hash_tokens[facet_count.fhash]; tsl::htrie_map<char, token_leaf> qtoken_leaves; //LOG(INFO) << "working on hash_tokens for hash " << kv.first << " with size " << ftokens.size(); for(size_t ti = 0; ti < ftokens.size(); ti++) { if(the_field.is_bool()) { if(ftokens[ti] == "1") { ftokens[ti] = "true"; } else { ftokens[ti] = "false"; } } Tokenizer(facet_query.query, true, false, the_field.locale, symbols_to_index, token_separators, the_field.get_stemmer()).tokenize(ftokens[ti]); const std::string& resolved_token = ftokens[ti]; size_t root_len = (fquery_tokens.size() == ftokens.size()) ? fquery_tokens[ti].size() : resolved_token.size(); token_leaf leaf(nullptr, root_len, 0, (ti == ftokens.size()-1)); qtoken_leaves.emplace(resolved_token, leaf); } std::vector<std::string> raw_fquery_tokens; Tokenizer(facet_query.query, normalise, false, the_field.locale, symbols_to_index, token_separators, the_field.get_stemmer()).tokenize(raw_fquery_tokens); if(raw_fquery_tokens.empty()) { continue; } size_t prefix_token_num_chars = StringUtils::get_num_chars(raw_fquery_tokens.back()); StringUtils string_utils; size_t last_valid_offset = 0; int last_valid_offset_index = -1; match_index_t match_index(Match(), 0, 0); uint8_t index_symbols[256] = {}; for(char c: symbols_to_index) { index_symbols[uint8_t(c)] = 1; } handle_highlight_text(value, normalise, the_field, false, symbols_to_index, token_separators, highlight, string_utils, use_word_tokenizer, highlight_affix_num_tokens, qtoken_leaves, last_valid_offset_index, prefix_token_num_chars, false, snippet_threshold, false, ftokens, last_valid_offset, highlight_start_tag, highlight_end_tag, index_symbols, match_index); } nlohmann::json parent; if(the_field.nested && should_return_parent) { parent = get_facet_parent(the_field.name, document, value, the_field.is_array()); } const auto& highlighted_text = highlight.snippets.empty() ? value : highlight.snippets[0]; facet_value_t facet_value = {value, highlighted_text, facet_count.count, facet_count.sort_field_val, parent}; facet_values.emplace_back(facet_value); } } if(a_facet.is_sort_by_alpha) { bool is_asc = a_facet.sort_order == "asc"; std::stable_sort(facet_values.begin(), facet_values.end(), [&] (const auto& fv1, const auto& fv2) { if(is_asc) { return fv1.value < fv2.value; } return fv1.value > fv2.value; }); } else if(!a_facet.sort_field.empty()) { bool is_asc = a_facet.sort_order == "asc"; std::stable_sort(facet_values.begin(), facet_values.end(), [&] (const auto& fv1, const auto& fv2) { if(is_asc) { return fv1.sort_field_val < fv2.sort_field_val; } return fv1.sort_field_val > fv2.sort_field_val; }); } else { std::stable_sort(facet_values.begin(), facet_values.end(), Collection::facet_count_str_compare); } for(const auto & facet_count: facet_values) { nlohmann::json facet_value_count = nlohmann::json::object(); const std::string & value = facet_count.value; facet_value_count["value"] = value; facet_value_count["highlighted"] = facet_count.highlighted; facet_value_count["count"] = facet_count.count; if(!facet_count.parent.empty()) { facet_value_count["parent"] = facet_count.parent; } facet_result["counts"].push_back(facet_value_count); } // add facet value stats facet_result["stats"] = nlohmann::json::object(); if(a_facet.stats.fvcount != 0) { facet_result["stats"]["min"] = a_facet.stats.fvmin; facet_result["stats"]["max"] = a_facet.stats.fvmax; facet_result["stats"]["sum"] = a_facet.stats.fvsum; facet_result["stats"]["avg"] = (a_facet.stats.fvsum / a_facet.stats.fvcount); } facet_result["stats"]["total_values"] = facet_counts.size(); result["facet_counts"].push_back(facet_result); } result["search_cutoff"] = search_cutoff; result["request_params"] = nlohmann::json::object(); result["request_params"]["collection_name"] = name; result["request_params"]["per_page"] = per_page; result["request_params"]["q"] = raw_query; result["request_params"]["first_q"] = first_q; if(!voice_query.empty()) { result["request_params"]["voice_query"] = nlohmann::json::object(); result["request_params"]["voice_query"]["transcribed_query"] = transcribed_query; } if(!override_metadata.empty()) { result["metadata"] = override_metadata; } //long long int timeMillis = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin).count(); //!LOG(INFO) << "Time taken for result calc: " << timeMillis << "us"; //!store->print_memory_usage(); return Option<nlohmann::json>(result); } void Collection::expand_search_query(const string& raw_query, size_t offset, size_t total, const search_args* search_params, const std::vector<std::vector<KV*>>& result_group_kvs, const std::vector<std::string>& raw_search_fields, string& first_q) const { if(!Config::get_instance().get_enable_search_analytics()) { return ; } if(offset == 0 && !raw_search_fields.empty() && !search_params->searched_queries.empty() && total != 0 && !result_group_kvs.empty()) { // we have to map raw_query (which could contain a prefix) back to expanded version auto search_field_it = search_schema.find(raw_search_fields[0]); if(search_field_it == search_schema.end() || Tokenizer::has_word_tokenizer(search_field_it->locale)) { return ; } first_q = ""; auto q_index = result_group_kvs[0][0]->query_index; if(q_index >= search_params->searched_queries.size()) { return ; } const auto& qleaves = search_params->searched_queries[q_index]; Tokenizer tokenizer(raw_query, true, false, search_field_it->locale, symbols_to_index, token_separators, search_field_it->get_stemmer()); std::string raw_token; size_t raw_token_index = 0, tok_start = 0, tok_end = 0; while(tokenizer.next(raw_token, raw_token_index, tok_start, tok_end)) { if(raw_token_index < qleaves.size()) { auto leaf = qleaves[raw_token_index]; std::string tok(reinterpret_cast<char*>(leaf->key), leaf->key_len - 1); if(StringUtils::begins_with(tok, raw_token)) { first_q += tok + " "; } } } if(qleaves.size() != raw_token_index+1) { first_q = raw_query; } if(!first_q.empty() && first_q.back() == ' ') { first_q.pop_back(); } } } void Collection::copy_highlight_doc(std::vector<highlight_field_t>& hightlight_items, const bool nested_fields_enabled, const nlohmann::json& src, nlohmann::json& dst) { for(const auto& hightlight_item: hightlight_items) { if(!nested_fields_enabled && src.count(hightlight_item.name) != 0) { dst[hightlight_item.name] = src[hightlight_item.name]; continue; } std::string root_field_name; for(size_t i = 0; i < hightlight_item.name.size(); i++) { if(hightlight_item.name[i] == '.') { break; } root_field_name += hightlight_item.name[i]; } if(dst.count(root_field_name) != 0) { // skip if parent "foo" has already has been copied over in e.g. foo.bar, foo.baz continue; } // root field name might not exist if object has primitive field values with "."s in the name if(src.count(root_field_name) != 0) { // copy whole sub-object dst[root_field_name] = src[root_field_name]; } else if(src.count(hightlight_item.name) != 0) { dst[hightlight_item.name] = src[hightlight_item.name]; } } } void Collection::process_search_field_weights(const std::vector<search_field_t>& search_fields, std::vector<uint32_t>& query_by_weights, std::vector<search_field_t>& weighted_search_fields) const { const bool weights_given = !query_by_weights.empty(); // weights, if given, must be in desc order bool weights_in_desc_order = true; bool weights_under_max = true; for(size_t i = 0; i < search_fields.size(); i++) { if(!weights_given) { size_t weight = std::max<int>(0, (int(Index::FIELD_MAX_WEIGHT) - i)); query_by_weights.push_back(weight); auto wsearch_field = search_fields[i]; wsearch_field.weight = weight; weighted_search_fields.push_back(wsearch_field); } else { // check if weights are already sorted auto prev_weight = (i == 0) ? query_by_weights[0] : query_by_weights[i-1]; weights_in_desc_order = weights_in_desc_order && (query_by_weights[i] <= prev_weight); weights_under_max = weights_under_max && (query_by_weights[i] <= Index::FIELD_MAX_WEIGHT); } } if(weights_given && (!weights_in_desc_order || !weights_under_max)) { // ensure that search fields are sorted on their corresponding weight std::vector<std::pair<size_t, size_t>> field_index_and_weights; for(size_t i=0; i < search_fields.size(); i++) { field_index_and_weights.emplace_back(i, search_fields[i].weight); } std::sort(field_index_and_weights.begin(), field_index_and_weights.end(), [](const auto& a, const auto& b) { return a.second > b.second; }); for(size_t i = 0; i < field_index_and_weights.size(); i++) { const auto& index_weight = field_index_and_weights[i]; // we have to also normalize weights to 0 to Index::FIELD_MAX_WEIGHT range. if(i == 0) { query_by_weights[i] = Index::FIELD_MAX_WEIGHT; } else { auto curr_weight = field_index_and_weights[i].second; auto prev_weight = field_index_and_weights[i-1].second; if(curr_weight == prev_weight) { query_by_weights[i] = query_by_weights[i-1]; } else { // bound to be lesser than prev_weight since weights have been sorted desc uint32_t bounded_weight = std::max(0, int(query_by_weights[i-1]) - 1); query_by_weights[i] = bounded_weight; } } const auto& search_field = search_fields[index_weight.first]; const auto weight = query_by_weights[i]; const size_t orig_index = index_weight.first; auto wsearch_field = search_fields[orig_index]; wsearch_field.weight = weight; weighted_search_fields.push_back(wsearch_field); } } if(weighted_search_fields.empty()) { for(size_t i=0; i < search_fields.size(); i++) { weighted_search_fields.push_back(search_fields[i]); } } } // lsb_offset is zero-based and inclusive uint64_t Collection::extract_bits(uint64_t value, unsigned lsb_offset, unsigned n) { const uint64_t max_n = CHAR_BIT * sizeof(uint64_t); if (lsb_offset >= max_n) { return 0; } value >>= lsb_offset; if (n >= max_n) { return value; } const uint64_t mask = ((uint64_t(1)) << n) - 1; /* n '1's */ return value & mask; } void Collection::populate_text_match_info(nlohmann::json& info, uint64_t match_score, const text_match_type_t match_type, const size_t total_tokens) const { // MAX_SCORE // [ sign | tokens_matched | max_field_score | max_field_weight | num_matching_fields ] // [ 1 | 4 | 48 | 8 | 3 ] (64 bits) // MAX_WEIGHT // [ sign | tokens_matched | max_field_weight | max_field_score | num_matching_fields ] // [ 1 | 4 | 8 | 48 | 3 ] (64 bits) auto tokens_matched = extract_bits(match_score, 59, 4); info["score"] = std::to_string(match_score); info["tokens_matched"] = tokens_matched; info["fields_matched"] = extract_bits(match_score, 0, 3); if(match_type == max_score) { info["best_field_score"] = std::to_string(extract_bits(match_score, 11, 48)); info["best_field_weight"] = extract_bits(match_score, 3, 8); info["num_tokens_dropped"] = total_tokens - tokens_matched; info["typo_prefix_score"] = 255 - extract_bits(match_score, 35, 8); } else { info["best_field_weight"] = extract_bits(match_score, 51, 8); info["best_field_score"] = std::to_string(extract_bits(match_score, 3, 48)); info["num_tokens_dropped"] = total_tokens - tokens_matched; info["typo_prefix_score"] = 255 - extract_bits(match_score, 27, 8); } } void Collection::process_highlight_fields(const std::vector<search_field_t>& search_fields, const std::vector<std::string>& raw_search_fields, const tsl::htrie_set<char>& include_fields, const tsl::htrie_set<char>& exclude_fields, const std::vector<std::string>& highlight_field_names, const std::vector<std::string>& highlight_full_field_names, const std::vector<enable_t>& infixes, std::vector<std::string>& q_tokens, const tsl::htrie_map<char, token_leaf>& qtoken_set, std::vector<highlight_field_t>& highlight_items) const { // identify full highlight fields spp::sparse_hash_set<std::string> fields_highlighted_fully_set; std::vector<std::string> fields_highlighted_fully_expanded; for(const std::string& highlight_full_field: highlight_full_field_names) { extract_field_name(highlight_full_field, search_schema, fields_highlighted_fully_expanded, true, enable_nested_fields); } for(std::string & highlight_full_field: fields_highlighted_fully_expanded) { fields_highlighted_fully_set.insert(highlight_full_field); } // identify infix enabled fields spp::sparse_hash_set<std::string> fields_infixed_set; for(size_t i = 0; i < search_fields.size(); i++) { const auto& field_name = search_fields[i].name; enable_t field_infix = search_fields[i].infix; if(field_infix != off) { fields_infixed_set.insert(field_name); } } if(highlight_field_names.empty()) { std::vector<std::string> highlight_field_names_expanded; for(size_t i = 0; i < raw_search_fields.size(); i++) { extract_field_name(raw_search_fields[i], search_schema, highlight_field_names_expanded, false, enable_nested_fields); } for(size_t i = 0; i < highlight_field_names_expanded.size(); i++) { const auto& field_name = highlight_field_names_expanded[i]; if(exclude_fields.count(field_name) != 0) { // should not pick excluded field for highlighting (only for implicit highlighting) continue; } if(!include_fields.empty() && include_fields.count(field_name) == 0) { // if include fields have been specified, use that as allow list continue; } bool fully_highlighted = (fields_highlighted_fully_set.count(field_name) != 0); bool infixed = (fields_infixed_set.count(field_name) != 0); auto schema_it = search_schema.find(field_name); bool is_string = (schema_it != search_schema.end()) && schema_it->is_string(); highlight_items.emplace_back(field_name, fully_highlighted, infixed, is_string); } } else { std::vector<std::string> highlight_field_names_expanded; for(size_t i = 0; i < highlight_field_names.size(); i++) { extract_field_name(highlight_field_names[i], search_schema, highlight_field_names_expanded, false, enable_nested_fields); } for(size_t i = 0; i < highlight_field_names_expanded.size(); i++) { const auto& highlight_field_name = highlight_field_names_expanded[i]; auto schema_it = search_schema.find(highlight_field_name); if(schema_it == search_schema.end()) { // ignore fields not part of schema continue; } bool fully_highlighted = (fields_highlighted_fully_set.count(highlight_field_name) != 0); bool infixed = (fields_infixed_set.count(highlight_field_name) != 0); bool is_string = schema_it->is_string(); highlight_items.emplace_back(highlight_field_name, fully_highlighted, infixed, is_string); } } std::string qtoken; for(auto it = qtoken_set.begin(); it != qtoken_set.end(); ++it) { it.key(qtoken); for(auto& highlight_item: highlight_items) { if(!highlight_item.is_string) { continue; } const auto& field_name = highlight_item.name; art_leaf* leaf = index->get_token_leaf(field_name, (const unsigned char*) qtoken.c_str(), qtoken.size()+1); if(leaf) { highlight_item.qtoken_leaves.insert(qtoken, token_leaf(leaf, it.value().root_len, it.value().num_typos, it.value().is_prefix) ); } } } // We will also add tokens from the query if they are not already added. // This helps handle highlighting of tokens which were dropped from the query to return results. for(auto& q_token: q_tokens) { if(qtoken_set.find(q_token) == qtoken_set.end()) { for(auto& highlight_item: highlight_items) { if(!highlight_item.is_string) { continue; } const auto& field_name = highlight_item.name; art_leaf* leaf = index->get_token_leaf(field_name, (const unsigned char*) q_token.c_str(), q_token.size()+1); if(leaf) { highlight_item.qtoken_leaves.insert(q_token, token_leaf(leaf, q_token.size(), 0, false)); } } } } } void Collection::process_filter_overrides(std::vector<const override_t*>& filter_overrides, std::vector<std::string>& q_include_tokens, token_ordering token_order, filter_node_t*& filter_tree_root, std::vector<std::pair<uint32_t, uint32_t>>& included_ids, std::vector<uint32_t>& excluded_ids, nlohmann::json& override_metadata, bool enable_typos_for_numerical_tokens, bool enable_typos_for_alpha_numerical_tokens) const { std::vector<const override_t*> matched_dynamic_overrides; index->process_filter_overrides(filter_overrides, q_include_tokens, token_order, filter_tree_root, matched_dynamic_overrides, override_metadata, enable_typos_for_numerical_tokens, enable_typos_for_alpha_numerical_tokens); // we will check the dynamic overrides to see if they also have include/exclude std::set<uint32_t> excluded_set; for(auto matched_dynamic_override: matched_dynamic_overrides) { for(const auto& hit: matched_dynamic_override->drop_hits) { Option<uint32_t> seq_id_op = doc_id_to_seq_id(hit.doc_id); if(seq_id_op.ok()) { excluded_ids.push_back(seq_id_op.get()); excluded_set.insert(seq_id_op.get()); } } for(const auto& hit: matched_dynamic_override->add_hits) { Option<uint32_t> seq_id_op = doc_id_to_seq_id(hit.doc_id); if(!seq_id_op.ok()) { continue; } uint32_t seq_id = seq_id_op.get(); bool excluded = (excluded_set.count(seq_id) != 0); if(!excluded) { included_ids.emplace_back(seq_id, hit.position); } } } } void Collection::process_tokens(std::vector<std::string>& tokens, std::vector<std::string>& q_include_tokens, std::vector<std::vector<std::string>>& q_exclude_tokens, std::vector<std::vector<std::string>>& q_phrases, bool& exclude_operator_prior, bool& phrase_search_op_prior, std::vector<std::string>& phrase, const std::string& stopwords_set, const bool& already_segmented, const std::string& locale, std::shared_ptr<Stemmer> stemmer) const{ auto symbols_to_index_has_minus = std::find(symbols_to_index.begin(), symbols_to_index.end(), '-') != symbols_to_index.end(); for(auto& token: tokens) { bool end_of_phrase = false; if(token == "-" && !symbols_to_index_has_minus) { if(locale != "en" && !locale.empty()) { // non-English locale parsing splits "-" as individual tokens exclude_operator_prior = true; } continue; } else if(token[0] == '-' && !symbols_to_index_has_minus) { exclude_operator_prior = true; token = token.substr(1); } if(token[0] == '"' && token.size() > 1) { phrase_search_op_prior = true; token = token.substr(1); } if(!token.empty() && (token.back() == '"' || (token[0] == '"' && token.size() == 1))) { if(phrase_search_op_prior) { // handles single token phrase and a phrase with padded space, like: "some query " end_of_phrase = true; token = token.substr(0, token.size()-1); } else if(token[0] == '"' && token.size() == 1) { // handles front padded phrase query, e.g. " some query" phrase_search_op_prior = true; } } std::vector<std::string> sub_tokens; if(already_segmented) { StringUtils::split(token, sub_tokens, " "); } else { Tokenizer(token, true, false, locale, symbols_to_index, token_separators, stemmer).tokenize(sub_tokens); } for(auto& sub_token: sub_tokens) { if(sub_token.size() > 100) { sub_token.erase(100); } if(exclude_operator_prior) { if(phrase_search_op_prior) { phrase.push_back(sub_token); } else { q_exclude_tokens.push_back({sub_token}); exclude_operator_prior = false; } } else if(phrase_search_op_prior) { phrase.push_back(sub_token); } else { q_include_tokens.push_back(sub_token); } } if(end_of_phrase && phrase_search_op_prior) { if(exclude_operator_prior) { q_exclude_tokens.push_back(phrase); } else { q_phrases.push_back(phrase); } phrase_search_op_prior = false; exclude_operator_prior = false; phrase.clear(); } } if(!phrase.empty()) { if(exclude_operator_prior) { q_exclude_tokens.push_back(phrase); } else { q_include_tokens.insert(q_include_tokens.end(), phrase.begin(), phrase.end()); } } if(q_include_tokens.empty()) { if(!stopwords_set.empty()) { //this can happen when all tokens in the include are stopwords q_include_tokens.emplace_back("##hrhdh##"); } else { // this can happen if the only query token is an exclusion token q_include_tokens.emplace_back("*"); } } } void Collection::parse_search_query(const std::string &query, std::vector<std::string>& q_include_tokens, std::vector<std::string>& q_unstemmed_tokens, std::vector<std::vector<std::string>>& q_exclude_tokens, std::vector<std::vector<std::string>>& q_phrases, const std::string& locale, const bool already_segmented, const std::string& stopwords_set, std::shared_ptr<Stemmer> stemmer) const { if(query == "*") { q_exclude_tokens = {}; q_include_tokens = {query}; } else { std::vector<std::string> tokens; std::vector<std::string> tokens_non_stemmed; stopword_struct_t stopwordStruct; if(!stopwords_set.empty()) { const auto &stopword_op = StopwordsManager::get_instance().get_stopword(stopwords_set, stopwordStruct); if (!stopword_op.ok()) { LOG(ERROR) << stopword_op.error(); LOG(ERROR) << "Error fetching stopword_list for stopword " << stopwords_set; } } if(already_segmented) { StringUtils::split(query, tokens, " "); } else { std::vector<char> custom_symbols = symbols_to_index; custom_symbols.push_back('-'); custom_symbols.push_back('"'); Tokenizer(query, true, false, locale, custom_symbols, token_separators, stemmer).tokenize(tokens); if(stemmer) { Tokenizer(query, true, false, locale, custom_symbols, token_separators, nullptr).tokenize(tokens_non_stemmed); } } for (const auto val: stopwordStruct.stopwords) { tokens.erase(std::remove(tokens.begin(), tokens.end(), val), tokens.end()); tokens_non_stemmed.erase(std::remove(tokens_non_stemmed.begin(), tokens_non_stemmed.end(), val), tokens_non_stemmed.end()); } bool exclude_operator_prior = false; bool phrase_search_op_prior = false; std::vector<std::string> phrase; process_tokens(tokens, q_include_tokens, q_exclude_tokens, q_phrases, exclude_operator_prior, phrase_search_op_prior, phrase, stopwords_set, already_segmented, locale, stemmer); if(stemmer) { exclude_operator_prior = false; phrase_search_op_prior = false; phrase.clear(); // those are unused std::vector<std::vector<std::string>> q_exclude_tokens_dummy; std::vector<std::vector<std::string>> q_phrases_dummy; process_tokens(tokens_non_stemmed, q_unstemmed_tokens, q_exclude_tokens_dummy, q_phrases_dummy, exclude_operator_prior, phrase_search_op_prior, phrase, stopwords_set, already_segmented, locale, nullptr); } } } void Collection::populate_result_kvs(Topster *topster, std::vector<std::vector<KV *>> &result_kvs, const spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, const std::vector<sort_by>& sort_by_fields) { if(topster->distinct) { // we have to pick top-K groups Topster gtopster(topster->MAX_SIZE); int group_count_index = -1; int group_sort_order = 1; for(int i = 0; i < sort_by_fields.size(); ++i) { if(sort_by_fields[i].name == sort_field_const::group_found) { group_count_index = i; if(sort_by_fields[i].order == sort_field_const::asc) { group_sort_order *= -1; } break; } } for(auto& group_topster: topster->group_kv_map) { group_topster.second->sort(); if(group_topster.second->size != 0) { KV* kv_head = group_topster.second->getKV(0); if(group_count_index >= 0) { const auto& itr = groups_processed.find(kv_head->distinct_key); if(itr != groups_processed.end()) { kv_head->scores[group_count_index] = itr->second * group_sort_order; } } gtopster.add(kv_head); } } gtopster.sort(); for(size_t i = 0; i < gtopster.size; i++) { KV* kv = gtopster.getKV(i); const std::vector<KV*> group_kvs( topster->group_kv_map[kv->distinct_key]->kvs, topster->group_kv_map[kv->distinct_key]->kvs+topster->group_kv_map[kv->distinct_key]->size ); result_kvs.emplace_back(group_kvs); } } else { for(uint32_t t = 0; t < topster->size; t++) { KV* kv = topster->getKV(t); result_kvs.push_back({kv}); } } } Option<bool> Collection::get_filter_ids(const std::string& filter_query, filter_result_t& filter_result, const bool& should_timeout) const { std::shared_lock lock(mutex); const std::string doc_id_prefix = std::to_string(collection_id) + "_" + DOC_ID_PREFIX + "_"; filter_node_t* filter_tree_root = nullptr; Option<bool> filter_op = filter::parse_filter_query(filter_query, search_schema, store, doc_id_prefix, filter_tree_root); std::unique_ptr<filter_node_t> filter_tree_root_guard(filter_tree_root); if(!filter_op.ok()) { return filter_op; } return index->do_filtering_with_lock(filter_tree_root, filter_result, name, should_timeout); } Option<bool> Collection::get_related_ids(const std::string& ref_field_name, const uint32_t& seq_id, std::vector<uint32_t>& result) const { return index->get_related_ids(ref_field_name, seq_id, result); } Option<bool> Collection::get_object_array_related_id(const std::string& ref_field_name, const uint32_t& seq_id, const uint32_t& object_index, uint32_t& result) const { return index->get_object_array_related_id(name, ref_field_name, seq_id, object_index, result); } Option<bool> Collection::get_reference_filter_ids(const std::string & filter_query, filter_result_t& filter_result, const std::string& reference_field_name) const { std::shared_lock lock(mutex); const std::string doc_id_prefix = std::to_string(collection_id) + "_" + DOC_ID_PREFIX + "_"; filter_node_t* filter_tree_root = nullptr; Option<bool> parse_op = filter::parse_filter_query(filter_query, search_schema, store, doc_id_prefix, filter_tree_root); std::unique_ptr<filter_node_t> filter_tree_root_guard(filter_tree_root); if(!parse_op.ok()) { return parse_op; } return index->do_reference_filtering_with_lock(filter_tree_root, filter_result, name, reference_field_name); } bool Collection::facet_value_to_string(const facet &a_facet, const facet_count_t &facet_count, nlohmann::json &document, std::string &value) const { if(document.count(a_facet.field_name) == 0) { // check for field exists if(search_schema.at(a_facet.field_name).optional) { return false; } LOG(ERROR) << "Could not find field " << a_facet.field_name << " in document during faceting."; LOG(ERROR) << "Facet field type: " << search_schema.at(a_facet.field_name).type; LOG(ERROR) << "Actual document: " << document; return false; } if(search_schema.at(a_facet.field_name).is_array()) { size_t array_sz = document[a_facet.field_name].size(); if(facet_count.array_pos >= array_sz) { LOG(ERROR) << "Facet field array size " << array_sz << " lesser than array pos " << facet_count.array_pos << " for facet field " << a_facet.field_name; LOG(ERROR) << "Facet field type: " << search_schema.at(a_facet.field_name).type; LOG(ERROR) << "Actual document: " << document; return false; } } auto coerce_op = validator_t::coerce_element(search_schema.at(a_facet.field_name), document, document[a_facet.field_name], fallback_field_type, DIRTY_VALUES::COERCE_OR_REJECT); if(!coerce_op.ok()) { LOG(ERROR) << "Bad type for field " << a_facet.field_name << ", document: " << document; return false; } if(search_schema.at(a_facet.field_name).type == field_types::STRING) { value = document[a_facet.field_name]; } else if(search_schema.at(a_facet.field_name).type == field_types::STRING_ARRAY) { value = document[a_facet.field_name][facet_count.array_pos]; } else if(search_schema.at(a_facet.field_name).type == field_types::INT32) { int32_t raw_val = document[a_facet.field_name].get<int32_t>(); value = std::to_string(raw_val); } else if(search_schema.at(a_facet.field_name).type == field_types::INT32_ARRAY) { int32_t raw_val = document[a_facet.field_name][facet_count.array_pos].get<int32_t>(); value = std::to_string(raw_val); } else if(search_schema.at(a_facet.field_name).type == field_types::INT64) { int64_t raw_val = document[a_facet.field_name].get<int64_t>(); value = std::to_string(raw_val); } else if(search_schema.at(a_facet.field_name).type == field_types::INT64_ARRAY) { int64_t raw_val = document[a_facet.field_name][facet_count.array_pos].get<int64_t>(); value = std::to_string(raw_val); } else if(search_schema.at(a_facet.field_name).type == field_types::FLOAT) { float raw_val = document[a_facet.field_name].get<float>(); value = StringUtils::float_to_str(raw_val); } else if(search_schema.at(a_facet.field_name).type == field_types::FLOAT_ARRAY) { float raw_val = document[a_facet.field_name][facet_count.array_pos].get<float>(); value = StringUtils::float_to_str(raw_val); } else if(search_schema.at(a_facet.field_name).type == field_types::BOOL) { value = std::to_string(document[a_facet.field_name].get<bool>()); value = (value == "1") ? "true" : "false"; } else if(search_schema.at(a_facet.field_name).type == field_types::BOOL_ARRAY) { value = std::to_string(document[a_facet.field_name][facet_count.array_pos].get<bool>()); value = (value == "1") ? "true" : "false"; } return true; } nlohmann::json Collection::get_parent_object(const nlohmann::json& parent, const nlohmann::json& child, const std::vector<std::string>& field_path, size_t field_index, const std::string& val) { if(field_index == field_path.size()) { std::string str_val; if(child.is_string()) { str_val = child.get<std::string>(); } else if(child.is_number_integer()) { str_val = std::to_string(child.get<int>()); } else if(child.is_number_float()) { str_val = std::to_string(child.get<float>()); } else if(child.is_boolean()) { str_val = std::to_string(child.get<bool>()); } if(str_val == val) { return parent; } if(child.is_array()) { for(const auto& ele: child) { if(ele.is_string() && ele == val) { return parent; } } } return nlohmann::json(); } const auto& fname = field_path[field_index]; // intermediate must be either an object or an array of objects if(child.is_object() && child.contains(fname)) { return get_parent_object(child, child[fname], field_path, field_index+1, val); } else if(child.is_array()) { nlohmann::json doc; for(const auto& ele: child) { doc = get_parent_object(ele, ele, field_path, field_index, val); if(!doc.empty()) { return doc; } } } return nlohmann::json(); } nlohmann::json Collection::get_facet_parent(const std::string& facet_field_name, const nlohmann::json& document, const std::string& val, bool is_array) const { std::vector<std::string> field_path; StringUtils::split(facet_field_name, field_path, "."); if(!document.contains(field_path[0])) { return document; } return get_parent_object(document, document[field_path[0]], field_path, 1, val); } bool Collection::is_nested_array(const nlohmann::json& obj, std::vector<std::string> path_parts, size_t part_i) const { auto child_it = obj.find(path_parts[part_i]); if(child_it == obj.end()) { return false; } if(child_it.value().is_array() && !child_it.value().empty() && child_it.value().at(0).is_object()) { return true; } if(part_i+1 == path_parts.size()) { return false; } return is_nested_array(child_it.value(), path_parts, part_i+1); } void Collection::highlight_result(const std::string& raw_query, const field &search_field, const size_t search_field_index, const tsl::htrie_map<char, token_leaf>& qtoken_leaves, const KV* field_order_kv, const nlohmann::json & document, nlohmann::json& highlight_doc, StringUtils & string_utils, const size_t snippet_threshold, const size_t highlight_affix_num_tokens, bool highlight_fully, bool is_infix_search, const std::string& highlight_start_tag, const std::string& highlight_end_tag, const uint8_t* index_symbols, highlight_t& highlight, bool& found_highlight, bool& found_full_highlight) const { if(raw_query == "*") { return; } tsl::htrie_set<char> matched_tokens; bool use_word_tokenizer = Tokenizer::has_word_tokenizer(search_field.locale); bool normalise = !use_word_tokenizer; std::vector<std::string> raw_query_tokens; Tokenizer(raw_query, normalise, false, search_field.locale, symbols_to_index, token_separators, search_field.get_stemmer()).tokenize(raw_query_tokens); if(raw_query_tokens.empty()) { return ; } bool flat_field = highlight_doc.contains(search_field.name); std::vector<std::string> path_parts; if(enable_nested_fields && !flat_field) { StringUtils::split(search_field.name, path_parts, "."); } else { path_parts = {search_field.name}; } const std::string& last_raw_q_token = raw_query_tokens.back(); size_t prefix_token_num_chars = StringUtils::get_num_chars(last_raw_q_token); std::set<std::string> last_full_q_tokens; std::vector<match_index_t> match_indices; if(is_infix_search) { // could be an optional field if(document.contains(search_field.name)) { size_t array_len = 1; bool field_is_array = document[search_field.name].is_array(); if(field_is_array) { array_len = document[search_field.name].size(); } const std::vector<token_positions_t> empty_offsets; for(size_t i = 0; i < array_len; i++) { std::string text = field_is_array ? document[search_field.name][i] : document[search_field.name]; StringUtils::tolowercase(text); if(text.size() < 100 && text.find(raw_query_tokens.front()) != std::string::npos) { const Match & this_match = Match(field_order_kv->key, empty_offsets, false, false); uint64_t this_match_score = this_match.get_match_score(0, 1); match_indices.emplace_back(this_match, this_match_score, i); } } } } if(!is_infix_search || match_indices.empty()) { /*std::string qtok_buff; for(auto it = qtoken_leaves.begin(); it != qtoken_leaves.end(); ++it) { it.key(qtok_buff); LOG(INFO) << "Token: " << qtok_buff << ", root_len: " << it.value().root_len; }*/ if(!qtoken_leaves.empty()) { std::vector<void*> posting_lists; for(auto token_leaf: qtoken_leaves) { posting_lists.push_back(token_leaf.leaf->values); } std::map<size_t, std::vector<token_positions_t>> array_token_positions; posting_t::get_array_token_positions(field_order_kv->key, posting_lists, array_token_positions); for(const auto& kv: array_token_positions) { const std::vector<token_positions_t>& token_positions = kv.second; size_t array_index = kv.first; if(token_positions.empty()) { continue; } const Match & this_match = Match(field_order_kv->key, token_positions, true, true); uint64_t this_match_score = this_match.get_match_score(1, token_positions.size()); match_indices.emplace_back(this_match, this_match_score, array_index); /*LOG(INFO) << "doc_id: " << document["id"] << ", search_field: " << search_field.name << ", words_present: " << size_t(this_match.words_present) << ", match_score: " << this_match_score << ", match.distance: " << size_t(this_match.distance);*/ } } } const size_t max_array_matches = std::min((size_t)MAX_ARRAY_MATCHES, match_indices.size()); std::partial_sort(match_indices.begin(), match_indices.begin()+max_array_matches, match_indices.end()); highlight_nested_field(highlight_doc, highlight_doc, path_parts, 0, false, -1, [&](nlohmann::json& h_obj, bool is_arr_obj_ele, int array_i) { if(h_obj.is_object()) { return ; } else if(!h_obj.is_string()) { auto val_back = h_obj; h_obj = nlohmann::json::object(); h_obj["snippet"] = to_string(val_back); h_obj["matched_tokens"] = nlohmann::json::array(); if(highlight_fully) { h_obj["value"] = val_back; } return ; } int matched_index = -1; if(!is_arr_obj_ele) { // Since we will iterate on both matching and non-matching array elements for highlighting, // we need to check if `array_i`exists within match_indices vec. for (size_t match_index = 0; match_index < match_indices.size(); match_index++) { if (match_indices[match_index].index == array_i) { matched_index = match_index; break; } } if(matched_index == -1) { // If an element does not belong to an array of object field and also does not have a matching index // we know that there cannot be any matching tokens for highlighting std::string text = h_obj.get<std::string>(); h_obj = nlohmann::json::object(); h_obj["snippet"] = text; h_obj["matched_tokens"] = nlohmann::json::array(); if(highlight_fully) { h_obj["value"] = text; } return ; } std::sort(match_indices[matched_index].match.offsets.begin(), match_indices[matched_index].match.offsets.end()); } else { // array of object element indices will not match indexed offsets, so we will use dummy match // the highlighting logic will ignore this and try to do exhaustive highlighting (look at all tokens) match_indices.clear(); match_indices.push_back(match_index_t(Match(), 0, 0)); matched_index = 0; } const auto& match_index = match_indices[matched_index]; size_t last_valid_offset = 0; int last_valid_offset_index = -1; for(size_t match_offset_i = 0; match_offset_i < match_index.match.offsets.size(); match_offset_i++) { const auto& token_offset = match_index.match.offsets[match_offset_i]; if(token_offset.offset != MAX_DISPLACEMENT) { last_valid_offset = token_offset.offset; last_valid_offset_index = match_offset_i; } else { break; } } highlight_t array_highlight = highlight; std::string text = h_obj.get<std::string>(); h_obj = nlohmann::json::object(); handle_highlight_text(text, normalise, search_field, is_arr_obj_ele, symbols_to_index, token_separators, array_highlight, string_utils, use_word_tokenizer, highlight_affix_num_tokens, qtoken_leaves, last_valid_offset_index, prefix_token_num_chars, highlight_fully, snippet_threshold, is_infix_search, raw_query_tokens, last_valid_offset, highlight_start_tag, highlight_end_tag, index_symbols, match_index); if(array_highlight.snippets.empty() && array_highlight.values.empty()) { h_obj["snippet"] = text; h_obj["matched_tokens"] = nlohmann::json::array(); } if(!array_highlight.snippets.empty()) { found_highlight = found_highlight || true; h_obj["snippet"] = array_highlight.snippets[0]; h_obj["matched_tokens"] = nlohmann::json::array(); for(auto& token_vec: array_highlight.matched_tokens) { for(auto& token: token_vec) { h_obj["matched_tokens"].push_back(token); } } } if(!array_highlight.values.empty()) { h_obj["value"] = array_highlight.values[0];; found_full_highlight = found_full_highlight || true; } else if(highlight_fully) { h_obj["value"] = text; } }); if(!flat_field) { return; } if(!search_field.is_string()) { return ; } if(!is_infix_search && qtoken_leaves.empty()) { // none of the tokens from the query were found on this field return ; } if(match_indices.empty()) { return ; } for(size_t array_i = 0; array_i < max_array_matches; array_i++) { std::sort(match_indices[array_i].match.offsets.begin(), match_indices[array_i].match.offsets.end()); const auto& match_index = match_indices[array_i]; const Match& match = match_index.match; size_t last_valid_offset = 0; int last_valid_offset_index = -1; for(size_t match_offset_index = 0; match_offset_index < match.offsets.size(); match_offset_index++) { const auto& token_offset = match.offsets[match_offset_index]; if(token_offset.offset != MAX_DISPLACEMENT) { last_valid_offset = token_offset.offset; last_valid_offset_index = match_offset_index; } else { break; } } if(!document.contains(search_field.name)) { // could be an optional field continue; } /*LOG(INFO) << "field: " << document[search_field.name] << ", id: " << field_order_kv->key << ", index: " << match_index.index;*/ std::string text; if(search_field.type == field_types::STRING) { text = document[search_field.name]; } else { // since we try to do manual prefix matching on the first array value, we have to check for an empty array if(!document[search_field.name].is_array() || match_index.index >= document[search_field.name].size()) { continue; } text = document[search_field.name][match_index.index]; } handle_highlight_text(text, normalise, search_field, false, symbols_to_index, token_separators, highlight, string_utils, use_word_tokenizer, highlight_affix_num_tokens, qtoken_leaves, last_valid_offset_index, prefix_token_num_chars, highlight_fully, snippet_threshold, is_infix_search, raw_query_tokens, last_valid_offset, highlight_start_tag, highlight_end_tag, index_symbols, match_index); if(!highlight.snippets.empty()) { found_highlight = found_highlight || true; for(auto& token_vec: highlight.matched_tokens) { for(auto& token: token_vec) { matched_tokens.insert(token); } } } if(!highlight.values.empty()) { found_full_highlight = found_full_highlight || true; } } highlight.field = search_field.name; highlight.field_index = search_field_index; if(!match_indices.empty()) { highlight.match_score = match_indices[0].match_score; } } bool Collection::handle_highlight_text(std::string& text, bool normalise, const field &search_field, const bool is_arr_obj_ele, const std::vector<char>& symbols_to_index, const std::vector<char>& token_separators, highlight_t& highlight, StringUtils & string_utils, bool use_word_tokenizer, const size_t highlight_affix_num_tokens, const tsl::htrie_map<char, token_leaf>& qtoken_leaves, int last_valid_offset_index, const size_t prefix_token_num_chars, bool highlight_fully, const size_t snippet_threshold, bool is_infix_search, std::vector<std::string>& raw_query_tokens, size_t last_valid_offset, const std::string& highlight_start_tag, const std::string& highlight_end_tag, const uint8_t* index_symbols, const match_index_t& match_index) const { const Match& match = match_index.match; Tokenizer tokenizer(text, normalise, false, search_field.locale, symbols_to_index, token_separators, search_field.get_stemmer()); // word tokenizer is a secondary tokenizer used for specific languages that requires transliteration Tokenizer word_tokenizer("", true, false, search_field.locale, symbols_to_index, token_separators, search_field.get_stemmer()); if(search_field.locale == "ko") { text = string_utils.unicode_nfkd(text); } // need an ordered map here to ensure that it is ordered by the key (start offset) std::map<size_t, size_t> token_offsets; int match_offset_index = 0; std::string raw_token; std::set<std::string> token_hits; // used to identify repeating tokens size_t raw_token_index = 0, tok_start = 0, tok_end = 0; // based on `highlight_affix_num_tokens` size_t snippet_start_offset = 0, snippet_end_offset = (text.empty() ? 0 : text.size() - 1); // window used to locate the starting offset for snippet on the text std::list<size_t> snippet_start_window; highlight.matched_tokens.emplace_back(); std::vector<std::string>& matched_tokens = highlight.matched_tokens.back(); bool found_first_match = false; size_t text_len = Tokenizer::is_ascii_char(text[0]) ? text.size() : StringUtils::get_num_chars(text); while(tokenizer.next(raw_token, raw_token_index, tok_start, tok_end)) { if(use_word_tokenizer) { bool found_token = word_tokenizer.tokenize(raw_token); if(!found_token) { tokenizer.decr_token_counter(); continue; } } if(!found_first_match) { if(snippet_start_window.size() == highlight_affix_num_tokens + 1) { snippet_start_window.pop_front(); } snippet_start_window.push_back(tok_start); } bool token_already_found = (token_hits.find(raw_token) != token_hits.end()); auto qtoken_it = qtoken_leaves.find(raw_token); // ensures that the `snippet_start_offset` is always from a matched token, and not from query suggestion bool match_offset_found = (found_first_match && token_already_found) || (match_offset_index <= last_valid_offset_index && match.offsets[match_offset_index].offset == raw_token_index); if(match_offset_found && text_len/4 > 64000) { // handle wrap around of token offsets: we will have to verify value of token as well match_offset_found = (qtoken_it != qtoken_leaves.end()); } // Token might not appear in the best matched window, which is limited to a size of 10. // If field is marked to be highlighted fully, or field length exceeds snippet_threshold, we will // locate all tokens that appear in the query / query candidates. Likewise, for text within nested array of // objects have to be exhaustively looked for highlight tokens. bool raw_token_found = !match_offset_found && (highlight_fully || is_arr_obj_ele || text_len < snippet_threshold * 6) && qtoken_leaves.find(raw_token) != qtoken_leaves.end(); if (match_offset_found || raw_token_found) { if(qtoken_it != qtoken_leaves.end() && qtoken_it.value().is_prefix && qtoken_it.value().root_len < raw_token.size()) { // need to ensure that only the prefix portion is highlighted // if length diff is within 2, we still might not want to highlight partially in some cases // e.g. "samsng" vs "samsung" -> full highlight is preferred, unless it's a full prefix match size_t k = tok_start; size_t num_letters = 0, prefix_letters = 0, prefix_end = tok_start; // group unicode code points and calculate number of actual characters while(k <= tok_end) { k++; if(tokenizer.should_skip_char(text[k])) { // used to handle special characters inside a tokenized word, e.g. `foo-bar` continue; } if ((text[k] & 0xC0) == 0x80) k++; if ((text[k] & 0xC0) == 0x80) k++; if ((text[k] & 0xC0) == 0x80) k++; num_letters++; if(num_letters <= prefix_token_num_chars) { prefix_letters++; } if(num_letters == prefix_token_num_chars) { prefix_end = k - 1; } } if(num_letters < prefix_token_num_chars) { // can happen in the case of stemming prefix_end = tok_start + num_letters; } size_t char_diff = num_letters - prefix_letters; auto new_tok_end = (char_diff <= 2 && qtoken_it.value().num_typos != 0) ? tok_end : prefix_end; token_offsets.emplace(tok_start, new_tok_end); } else { token_offsets.emplace(tok_start, tok_end); } token_hits.insert(raw_token); if(match_offset_found) { // to skip over duplicate tokens in the query do { match_offset_index++; } while(match_offset_index <= last_valid_offset_index && match.offsets[match_offset_index - 1].offset == match.offsets[match_offset_index].offset); if(!found_first_match) { snippet_start_offset = snippet_start_window.front(); } found_first_match = true; } else if(raw_token_found && is_arr_obj_ele) { if(!found_first_match) { snippet_start_offset = snippet_start_window.front(); } found_first_match = true; } } else if(is_infix_search && text.size() < 100 && raw_token.find(raw_query_tokens.front()) != std::string::npos) { token_offsets.emplace(tok_start, tok_end); token_hits.insert(raw_token); } if(last_valid_offset_index != -1 && raw_token_index >= last_valid_offset + highlight_affix_num_tokens) { // register end of highlight snippet if(snippet_end_offset == text.size() - 1) { snippet_end_offset = tok_end; } } // We can break early only if we have: // a) run out of matched indices // b) token_index exceeds the suffix tokens boundary // c) raw_token_index exceeds snippet threshold // d) highlight fully is not requested if(raw_token_index >= snippet_threshold && match_offset_index > last_valid_offset_index && raw_token_index >= last_valid_offset + highlight_affix_num_tokens && !is_arr_obj_ele && !highlight_fully) { break; } } if(token_offsets.empty()) { return false; } if(raw_token_index <= snippet_threshold-1) { // fully highlight field whose token size is less than given snippet threshold snippet_start_offset = 0; snippet_end_offset = text.size() - 1; } // `token_offsets` has a list of ranges to target for highlighting // tokens from query might occur before actual snippet start offset: we skip that auto offset_it = token_offsets.begin(); while(offset_it != token_offsets.end() && offset_it->first < snippet_start_offset) { offset_it++; } std::stringstream highlighted_text; highlight_text(highlight_start_tag, highlight_end_tag, text, token_offsets, snippet_end_offset, matched_tokens, offset_it, highlighted_text, index_symbols, snippet_start_offset); highlight.snippets.push_back(highlighted_text.str()); if(search_field.type == field_types::STRING_ARRAY) { highlight.indices.push_back(match_index.index); } if(highlight_fully) { std::stringstream value_stream; offset_it = token_offsets.begin(); std::vector<std::string> full_matched_tokens; highlight_text(highlight_start_tag, highlight_end_tag, text, token_offsets, text.size()-1, full_matched_tokens, offset_it, value_stream, index_symbols, 0); highlight.values.push_back(value_stream.str()); } return true; } void Collection::highlight_text(const string& highlight_start_tag, const string& highlight_end_tag, const string& text, const std::map<size_t, size_t>& token_offsets, size_t snippet_end_offset, std::vector<std::string>& matched_tokens, std::map<size_t, size_t>::iterator& offset_it, std::stringstream& highlighted_text, const uint8_t* index_symbols, size_t snippet_start_offset) { while(snippet_start_offset <= snippet_end_offset) { if(offset_it != token_offsets.end()) { if (snippet_start_offset == offset_it->first) { highlighted_text << highlight_start_tag; auto end_offset = offset_it->second; // if a token ends with one or more puncutation chars, we should not highlight them for(int j = end_offset; j >= 0; j--) { if(end_offset >= text.size()) { // this should not happen unless we mess up unicode normalization break; } if(!std::isalnum(text[j]) && Tokenizer::is_ascii_char(text[j]) && index_symbols[uint8_t(text[j])] != 1) { end_offset--; } else { break; } } size_t token_len = end_offset - snippet_start_offset + 1; const std::string& text_token = text.substr(snippet_start_offset, token_len); matched_tokens.push_back(text_token); for(size_t j = 0; j < token_len; j++) { if((snippet_start_offset + j) >= text.size()) { LOG(ERROR) << "??? snippet_start_offset: " << snippet_start_offset << ", offset_it->first: " << offset_it->first << ", offset_it->second: " << offset_it->second << ", end_offset: " << end_offset << ", j: " << j << ", token_len: " << token_len << ", text: " << text; break; } highlighted_text << text[snippet_start_offset + j]; } highlighted_text << highlight_end_tag; offset_it++; snippet_start_offset += token_len; continue; } } highlighted_text << text[snippet_start_offset]; snippet_start_offset++; } } Option<nlohmann::json> Collection::get(const std::string & id) const { std::string seq_id_str; StoreStatus seq_id_status = store->get(get_doc_id_key(id), seq_id_str); if(seq_id_status == StoreStatus::NOT_FOUND) { return Option<nlohmann::json>(404, "Could not find a document with id: " + id); } if(seq_id_status == StoreStatus::ERROR) { return Option<nlohmann::json>(500, "Error while fetching the document."); } uint32_t seq_id = (uint32_t) std::stoul(seq_id_str); std::string parsed_document; StoreStatus doc_status = store->get(get_seq_id_key(seq_id), parsed_document); if(doc_status == StoreStatus::NOT_FOUND) { LOG(ERROR) << "Sequence ID exists, but document is missing for id: " << id; return Option<nlohmann::json>(404, "Could not find a document with id: " + id); } if(doc_status == StoreStatus::ERROR) { return Option<nlohmann::json>(500, "Error while fetching the document."); } nlohmann::json document; try { document = nlohmann::json::parse(parsed_document); } catch(...) { return Option<nlohmann::json>(500, "Error while parsing stored document."); } return Option<nlohmann::json>(document); } void Collection::remove_document(nlohmann::json & document, const uint32_t seq_id, bool remove_from_store) { spp::sparse_hash_map<std::string, std::string> referenced_in_copy; { std::unique_lock lock(mutex); referenced_in_copy = referenced_in; } // Cascade delete all the references. if (!referenced_in_copy.empty()) { CollectionManager& collectionManager = CollectionManager::get_instance(); for (const auto &item: referenced_in_copy) { auto coll = collectionManager.get_collection(item.first); if (coll != nullptr) { coll->cascade_remove_docs(item.second, seq_id, document, remove_from_store); } } } { std::unique_lock lock(mutex); index->remove(seq_id, document, {}, false); if (num_documents != 0) { num_documents -= 1; } } if(remove_from_store) { const std::string& id = document["id"]; store->remove(get_doc_id_key(id)); store->remove(get_seq_id_key(seq_id)); } } void Collection::cascade_remove_docs(const std::string& field_name, const uint32_t& ref_seq_id, const nlohmann::json& ref_doc, bool remove_from_store) { auto const ref_helper_field_name = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX; filter_result_t filter_result; get_filter_ids(ref_helper_field_name + ":" + std::to_string(ref_seq_id), filter_result, false); if (filter_result.count == 0) { return; } bool is_field_singular, is_field_optional; { std::unique_lock lock(mutex); auto it = search_schema.find(field_name); if (it == search_schema.end()) { return; } is_field_singular = it.value().is_singular(); is_field_optional = it.value().optional; } std::vector<std::string> buffer; buffer.reserve(filter_result.count); if (is_field_singular) { // Delete all the docs where reference helper field has value `seq_id`. for (uint32_t i = 0; i < filter_result.count; i++) { auto const& seq_id = filter_result.docs[i]; nlohmann::json existing_document; auto get_doc_op = get_document_from_store(get_seq_id_key(seq_id), existing_document); if (!get_doc_op.ok()) { if (get_doc_op.code() == 404) { LOG(ERROR) << "`" << name << "` collection: Sequence ID `" << seq_id << "` exists, but document is missing."; continue; } LOG(ERROR) << "`" << name << "` collection: " << get_doc_op.error(); continue; } bool multiple_ref_fields = existing_document.contains(fields::reference_helper_fields) && existing_document[fields::reference_helper_fields].size() > 1; // If there are other references present and the reference of an optional field is removed, don't delete the // document. if (multiple_ref_fields && is_field_optional) { auto const id = existing_document["id"].get<std::string>(); nlohmann::json update_document; update_document["id"] = id; update_document[field_name] = nullptr; buffer.push_back(update_document.dump()); } else { remove_document(existing_document, seq_id, remove_from_store); } } } else { std::string ref_coll_name, ref_field_name; { std::unique_lock lock(mutex); auto ref_it = reference_fields.find(field_name); if (ref_it == reference_fields.end()) { return; } ref_coll_name = ref_it->second.collection; ref_field_name = ref_it->second.field; } if (ref_doc.count(ref_field_name) == 0) { LOG(ERROR) << "`" << ref_coll_name << "` collection doc `" << ref_doc.dump() << "` is missing `" << ref_field_name << "` field."; return; } else if (ref_doc.at(ref_field_name).is_array()) { LOG(ERROR) << "`" << ref_coll_name << "` collection doc `" << ref_doc.dump() << "` field `" << ref_field_name << "` is an array."; return; } // Delete all references to `seq_id` in the docs. for (uint32_t i = 0; i < filter_result.count; i++) { auto const& seq_id = filter_result.docs[i]; nlohmann::json existing_document; auto get_doc_op = get_document_from_store(get_seq_id_key(seq_id), existing_document); if (!get_doc_op.ok()) { if (get_doc_op.code() == 404) { LOG(ERROR) << "`" << name << "` collection: Sequence ID `" << seq_id << "` exists, but document is missing."; continue; } LOG(ERROR) << "`" << name << "` collection: " << get_doc_op.error(); continue; } if (existing_document.count("id") == 0) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` is missing `id` field."; } else if (existing_document.count(field_name) == 0) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` is missing `" << field_name << "` field."; } else if (!existing_document.at(field_name).is_array()) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` field `" << field_name << "` is not an array."; } else if (existing_document.at(field_name).empty()) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` field `" << field_name << "` is empty."; } else if (existing_document.at(field_name)[0].type() != ref_doc.at(ref_field_name).type()) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` at field `" << field_name << "` elements do not match the type of `" << ref_coll_name << "` collection doc `"<< ref_doc.dump() << "` at field `" << ref_field_name << "`."; } else if (existing_document.count(ref_helper_field_name) == 0) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` is missing `" << ref_helper_field_name << "` field."; } else if (!existing_document.at(ref_helper_field_name).is_array()) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` field `" << ref_helper_field_name << "` is not an array."; } else if (existing_document[field_name].size() != existing_document[ref_helper_field_name].size()) { LOG(ERROR) << "`" << name << "` collection doc `" << existing_document.dump() << "` reference field `" << field_name << "` values and its reference helper field `" << ref_helper_field_name << "` values differ in count."; } // If there are more than one references present in this document, we cannot delete the whole doc. Only remove // `ref_seq_id` from reference helper field. else if (existing_document.at(field_name).size() > 1) { nlohmann::json update_document; update_document["id"] = existing_document["id"].get<std::string>(); update_document[field_name] = nlohmann::json::array(); auto removed_ref_value_found = false; // We assume here that the value in reference field at a particular index corresponds to the value // present at the same index in the reference helper field. for (uint32_t j = 0; j < existing_document[field_name].size(); j++) { auto const& ref_value = existing_document[field_name][j]; if (ref_value == ref_doc.at(ref_field_name)) { removed_ref_value_found = true; continue; } update_document[field_name] += ref_value; update_document[ref_helper_field_name] += existing_document[ref_helper_field_name][j]; } if (removed_ref_value_found) { buffer.push_back(update_document.dump()); } continue; } bool multiple_ref_fields = existing_document.contains(fields::reference_helper_fields) && existing_document[fields::reference_helper_fields].size() > 1; // If there are other references present and the reference of an optional field is removed, don't delete the // document. if (multiple_ref_fields && is_field_optional) { auto const id = existing_document["id"].get<std::string>(); nlohmann::json update_document; update_document["id"] = id; update_document[field_name] = nullptr; buffer.push_back(update_document.dump()); } else { remove_document(existing_document, seq_id, remove_from_store); } } } nlohmann::json dummy; add_many(buffer, dummy, index_operation_t::UPDATE); } Option<std::string> Collection::remove(const std::string & id, const bool remove_from_store) { std::string seq_id_str; StoreStatus seq_id_status = store->get(get_doc_id_key(id), seq_id_str); if(seq_id_status == StoreStatus::NOT_FOUND) { return Option<std::string>(404, "Could not find a document with id: " + id); } if(seq_id_status == StoreStatus::ERROR) { return Option<std::string>(500, "Error while fetching the document."); } uint32_t seq_id = (uint32_t) std::stoul(seq_id_str); nlohmann::json document; auto get_doc_op = get_document_from_store(get_seq_id_key(seq_id), document); if(!get_doc_op.ok()) { if(get_doc_op.code() == 404) { LOG(ERROR) << "Sequence ID exists, but document is missing for id: " << id; return Option<std::string>(404, "Could not find a document with id: " + id); } return Option<std::string>(get_doc_op.code(), get_doc_op.error()); } remove_document(document, seq_id, remove_from_store); return Option<std::string>(id); } Option<bool> Collection::remove_if_found(uint32_t seq_id, const bool remove_from_store) { nlohmann::json document; auto get_doc_op = get_document_from_store(get_seq_id_key(seq_id), document); if(!get_doc_op.ok()) { if(get_doc_op.code() == 404) { return Option<bool>(false); } return Option<bool>(500, "Error while fetching the document with seq id: " + std::to_string(seq_id)); } remove_document(document, seq_id, remove_from_store); return Option<bool>(true); } Option<uint32_t> Collection::add_override(const override_t & override, bool write_to_store) { if(write_to_store) { bool inserted = store->insert(Collection::get_override_key(name, override.id), override.to_json().dump()); if(!inserted) { return Option<uint32_t>(500, "Error while storing the override on disk."); } } std::unique_lock lock(mutex); if(overrides.count(override.id) != 0 && !overrides[override.id].rule.tags.empty()) { // remove existing tags for(auto& tag: overrides[override.id].rule.tags) { if(override_tags.count(tag) != 0) { override_tags[tag].erase(override.id); } } } overrides[override.id] = override; for(const auto& tag: override.rule.tags) { override_tags[tag].insert(override.id); } return Option<uint32_t>(200); } Option<uint32_t> Collection::remove_override(const std::string & id) { if(overrides.count(id) != 0) { bool removed = store->remove(Collection::get_override_key(name, id)); if(!removed) { return Option<uint32_t>(500, "Error while deleting the override from disk."); } std::unique_lock lock(mutex); for(const auto& tag: overrides[id].rule.tags) { if(override_tags.count(tag) != 0) { override_tags[tag].erase(id); } } overrides.erase(id); return Option<uint32_t>(200); } return Option<uint32_t>(404, "Could not find that `id`."); } uint32_t Collection::get_seq_id_from_key(const std::string & key) { // last 4 bytes of the key would be the serialized version of the sequence id std::string serialized_seq_id = key.substr(key.length() - 4); return StringUtils::deserialize_uint32_t(serialized_seq_id); } std::string Collection::get_next_seq_id_key(const std::string & collection_name) { return std::string(COLLECTION_NEXT_SEQ_PREFIX) + "_" + collection_name; } std::string Collection::get_seq_id_key(uint32_t seq_id) const { // We can't simply do std::to_string() because we want to preserve the byte order. // & 0xFF masks all but the lowest eight bits. const std::string & serialized_id = StringUtils::serialize_uint32_t(seq_id); return get_seq_id_collection_prefix() + "_" + serialized_id; } std::string Collection::get_doc_id_key(const std::string & doc_id) const { return std::to_string(collection_id) + "_" + DOC_ID_PREFIX + "_" + doc_id; } std::string Collection::get_name() const { std::shared_lock lock(mutex); return name; } uint64_t Collection::get_created_at() const { return created_at.load(); } size_t Collection::get_num_documents() const { return num_documents.load(); } uint32_t Collection::get_collection_id() const { return collection_id.load(); } Option<uint32_t> Collection::doc_id_to_seq_id_with_lock(const std::string & doc_id) const { std::shared_lock lock(mutex); return doc_id_to_seq_id(doc_id); } Option<uint32_t> Collection::doc_id_to_seq_id(const std::string & doc_id) const { std::string seq_id_str; StoreStatus status = store->get(get_doc_id_key(doc_id), seq_id_str); if(status == StoreStatus::FOUND) { uint32_t seq_id = std::stoul(seq_id_str); return Option<uint32_t>(seq_id); } if(status == StoreStatus::NOT_FOUND) { return Option<uint32_t>(404, "Not found."); } return Option<uint32_t>(500, "Error while fetching doc_id from store."); } std::vector<std::string> Collection::get_facet_fields() { std::shared_lock lock(mutex); std::vector<std::string> facet_fields_copy; for(auto it = search_schema.begin(); it != search_schema.end(); ++it) { if(it.value().facet) { facet_fields_copy.push_back(it.key()); } } return facet_fields_copy; } std::vector<field> Collection::get_sort_fields() { std::shared_lock lock(mutex); std::vector<field> sort_fields_copy; for(auto it = search_schema.begin(); it != search_schema.end(); ++it) { if(it.value().sort) { sort_fields_copy.push_back(it.value()); } } return sort_fields_copy; } std::vector<field> Collection::get_fields() { std::shared_lock lock(mutex); return fields; } bool Collection::contains_field(const std::string &field) { std::shared_lock lock(mutex); return search_schema.find(field) != search_schema.end(); } std::unordered_map<std::string, field> Collection::get_dynamic_fields() { std::shared_lock lock(mutex); return dynamic_fields; } tsl::htrie_map<char, field> Collection::get_schema() { std::shared_lock lock(mutex); return search_schema; }; tsl::htrie_map<char, field> Collection::get_nested_fields() { std::shared_lock lock(mutex); return nested_fields; }; tsl::htrie_map<char, field> Collection::get_embedding_fields() { std::shared_lock lock(mutex); return embedding_fields; }; tsl::htrie_set<char> Collection::get_object_reference_helper_fields() { std::shared_lock lock(mutex); return object_reference_helper_fields; } std::string Collection::get_meta_key(const std::string & collection_name) { return std::string(COLLECTION_META_PREFIX) + "_" + collection_name; } std::string Collection::get_override_key(const std::string & collection_name, const std::string & override_id) { return std::string(COLLECTION_OVERRIDE_PREFIX) + "_" + collection_name + "_" + override_id; } std::string Collection::get_seq_id_collection_prefix() const { return std::to_string(collection_id) + "_" + std::string(SEQ_ID_PREFIX); } std::string Collection::get_default_sorting_field() { std::shared_lock lock(mutex); return default_sorting_field; } void Collection::update_metadata(const nlohmann::json& meta) { std::shared_lock lock(mutex); metadata = meta; } Option<bool> Collection::update_apikey(const nlohmann::json& model_config, const std::string& field_name) { std::unique_lock ulock(mutex); const auto& model_name = model_config[fields::model_name]; const auto& api_key = model_config[fields::api_key]; for(auto& coll_field : fields) { if (coll_field.name == field_name) { auto &coll_model_config = coll_field.embed[fields::model_config]; if (!coll_model_config.contains(fields::model_name) || coll_model_config[fields::model_name] != model_name) { return Option<bool>(400, "`model_name` mismatch for api_key updation."); } if (!coll_model_config.contains(fields::api_key)) { return Option<bool>(400, "Invalid model for api_key updation."); } if (coll_model_config[fields::api_key] == api_key) { return Option<bool>(400, "trying to update with same api_key."); } //update in remote embedder first the in collection auto update_op = EmbedderManager::get_instance().update_remote_model_apikey(coll_model_config, api_key); if (!update_op.ok()) { return update_op; } coll_model_config[fields::api_key] = api_key; embedding_fields[field_name].embed[fields::model_config][fields::api_key] = api_key; auto persist_op = persist_collection_meta(); if (!persist_op.ok()) { return persist_op; } } } return Option<bool>(true); } Option<bool> Collection::get_document_from_store(const uint32_t& seq_id, nlohmann::json& document, bool raw_doc) const { return get_document_from_store(get_seq_id_key(seq_id), document, raw_doc); } Option<bool> Collection::get_document_from_store(const std::string &seq_id_key, nlohmann::json& document, bool raw_doc) const { std::string json_doc_str; StoreStatus json_doc_status = store->get(seq_id_key, json_doc_str); if(json_doc_status != StoreStatus::FOUND) { const std::string& seq_id = std::to_string(get_seq_id_from_key(seq_id_key)); if(json_doc_status == StoreStatus::NOT_FOUND) { return Option<bool>(404, "Could not locate the JSON document for sequence ID: " + seq_id); } return Option<bool>(500, "Error while fetching JSON document for sequence ID: " + seq_id); } try { document = nlohmann::json::parse(json_doc_str); } catch(...) { return Option<bool>(500, "Error while parsing stored document with sequence ID: " + seq_id_key); } if(!raw_doc && enable_nested_fields) { std::vector<field> flattened_fields; field::flatten_doc(document, nested_fields, {}, true, flattened_fields); } return Option<bool>(true); } const Index* Collection::_get_index() const { return index; } Option<bool> Collection::parse_pinned_hits(const std::string& pinned_hits_str, std::map<size_t, std::vector<std::string>>& pinned_hits) { if(!pinned_hits_str.empty()) { std::vector<std::string> pinned_hits_strs; StringUtils::split(pinned_hits_str, pinned_hits_strs, ","); for(const std::string & pinned_hits_part: pinned_hits_strs) { std::vector<std::string> expression_parts; int64_t index = pinned_hits_part.size() - 1; while(index >= 0 && pinned_hits_part[index] != ':') { index--; } if(index == 0) { return Option<bool>(400, "Pinned hits are not in expected format."); } std::string pinned_id = pinned_hits_part.substr(0, index); std::string pinned_pos = pinned_hits_part.substr(index+1); if(!StringUtils::is_positive_integer(pinned_pos)) { return Option<bool>(400, "Pinned hits are not in expected format."); } int position = std::stoi(pinned_pos); if(position == 0) { return Option<bool>(400, "Pinned hits must start from position 1."); } pinned_hits[position].emplace_back(pinned_id); } } return Option<bool>(true); } Option<drop_tokens_param_t> Collection::parse_drop_tokens_mode(const std::string& drop_tokens_mode) { drop_tokens_mode_t drop_tokens_mode_val = left_to_right; size_t drop_tokens_token_limit = 1000; auto drop_tokens_mode_op = magic_enum::enum_cast<drop_tokens_mode_t>(drop_tokens_mode); if(drop_tokens_mode_op.has_value()) { drop_tokens_mode_val = drop_tokens_mode_op.value(); } else { std::vector<std::string> drop_token_parts; StringUtils::split(drop_tokens_mode, drop_token_parts, ":"); if(drop_token_parts.size() == 2) { if(!StringUtils::is_uint32_t(drop_token_parts[1])) { return Option<drop_tokens_param_t>(400, "Invalid format for drop tokens mode."); } drop_tokens_mode_op = magic_enum::enum_cast<drop_tokens_mode_t>(drop_token_parts[0]); if(drop_tokens_mode_op.has_value()) { drop_tokens_mode_val = drop_tokens_mode_op.value(); } drop_tokens_token_limit = std::stoul(drop_token_parts[1]); } else { return Option<drop_tokens_param_t>(400, "Invalid format for drop tokens mode."); } } return Option<drop_tokens_param_t>(drop_tokens_param_t(drop_tokens_mode_val, drop_tokens_token_limit)); } Option<bool> Collection::add_synonym(const nlohmann::json& syn_json, bool write_to_store) { std::shared_lock lock(mutex); synonym_t synonym; Option<bool> syn_op = synonym_t::parse(syn_json, synonym); if(!syn_op.ok()) { return syn_op; } return synonym_index->add_synonym(name, synonym, write_to_store); } bool Collection::get_synonym(const std::string& id, synonym_t& synonym) { std::shared_lock lock(mutex); return synonym_index->get_synonym(id, synonym); } Option<bool> Collection::remove_synonym(const std::string &id) { std::shared_lock lock(mutex); return synonym_index->remove_synonym(name, id); } void Collection::synonym_reduction(const std::vector<std::string>& tokens, const std::string& locale, std::vector<std::vector<std::string>>& results, bool synonym_prefix, uint32_t synonym_num_typos) const { std::shared_lock lock(mutex); return synonym_index->synonym_reduction(tokens, locale, results, synonym_prefix, synonym_num_typos); } Option<override_t> Collection::get_override(const std::string& override_id) { std::shared_lock lock(mutex); if(overrides.count(override_id) == 0) { return Option<override_t>(404, "override " + override_id + " not found."); } return Option<override_t>(overrides.at(override_id)); } Option<std::map<std::string, override_t*>> Collection::get_overrides(uint32_t limit, uint32_t offset) { std::shared_lock lock(mutex); std::map<std::string, override_t*> overrides_map; auto overrides_it = overrides.begin(); if(offset > 0) { if(offset >= overrides.size()) { return Option<std::map<std::string, override_t*>>(400, "Invalid offset param."); } std::advance(overrides_it, offset); } auto overrides_end = overrides.end(); if(limit > 0 && (offset + limit < overrides.size())) { overrides_end = overrides_it; std::advance(overrides_end, limit); } for (overrides_it; overrides_it != overrides_end; ++overrides_it) { overrides_map[overrides_it->first] = &overrides_it->second; } return Option<std::map<std::string, override_t*>>(overrides_map); } Option<std::map<uint32_t, synonym_t*>> Collection::get_synonyms(uint32_t limit, uint32_t offset) { std::shared_lock lock(mutex); auto synonyms_op = synonym_index->get_synonyms(limit, offset); if(!synonyms_op.ok()) { return Option<std::map<uint32_t, synonym_t*>>(synonyms_op.code(), synonyms_op.error()); } return synonyms_op; } SynonymIndex* Collection::get_synonym_index() { return synonym_index; } spp::sparse_hash_map<std::string, reference_info_t> Collection::get_reference_fields() { std::shared_lock lock(mutex); return reference_fields; } spp::sparse_hash_map<std::string, std::set<reference_pair_t>> Collection::get_async_referenced_ins() { std::shared_lock lock(mutex); return async_referenced_ins; }; Option<bool> Collection::persist_collection_meta() { std::string coll_meta_json; StoreStatus status = store->get(Collection::get_meta_key(name), coll_meta_json); if(status != StoreStatus::FOUND) { return Option<bool>(500, "Could not fetch collection meta from store."); } nlohmann::json collection_meta; try { collection_meta = nlohmann::json::parse(coll_meta_json); } catch(...) { return Option<bool>(500, "Unable to parse collection meta."); } nlohmann::json fields_json = nlohmann::json::array(); Option<bool> fields_json_op = field::fields_to_json_fields(fields, default_sorting_field, fields_json); if(!fields_json_op.ok()) { return Option<bool>(fields_json_op.code(), fields_json_op.error()); } collection_meta[COLLECTION_SEARCH_FIELDS_KEY] = fields_json; collection_meta[Collection::COLLECTION_DEFAULT_SORTING_FIELD_KEY] = default_sorting_field; collection_meta[Collection::COLLECTION_FALLBACK_FIELD_TYPE] = fallback_field_type; bool persisted = store->insert(Collection::get_meta_key(name), collection_meta.dump()); if(!persisted) { return Option<bool>(500, "Could not persist collection meta to store."); } return Option<bool>(true); } Option<bool> Collection::batch_alter_data(const std::vector<field>& alter_fields, const std::vector<field>& del_fields, const std::string& this_fallback_field_type) { // Update schema with additions (deletions can only be made later) std::vector<field> new_fields; tsl::htrie_map<char, field> schema_additions; bool found_embedding_field = false; std::unique_lock ulock(mutex); for(auto& f: alter_fields) { if(f.name == ".*") { fields.push_back(f); continue; } if(f.is_dynamic()) { dynamic_fields.emplace(f.name, f); } else { schema_additions.emplace(f.name, f); search_schema.emplace(f.name, f); new_fields.push_back(f); } if(f.nested) { check_and_add_nested_field(nested_fields, f); } if(f.embed.count(fields::from) != 0) { found_embedding_field = true; const auto& text_embedders = EmbedderManager::get_instance()._get_text_embedders(); const auto& model_name = f.embed[fields::model_config][fields::model_name].get<std::string>(); if(text_embedders.count(model_name) == 0) { size_t dummy_num_dim = 0; auto validate_model_res = EmbedderManager::get_instance().validate_and_init_model(f.embed[fields::model_config], dummy_num_dim); if(!validate_model_res.ok()) { return Option<bool>(validate_model_res.code(), validate_model_res.error()); } } embedding_fields.emplace(f.name, f); } fields.push_back(f); } ulock.unlock(); std::shared_lock shlock(mutex); index->refresh_schemas(new_fields, {}); // Now, we can index existing data onto the updated schema const std::string seq_id_prefix = get_seq_id_collection_prefix(); std::string upper_bound_key = get_seq_id_collection_prefix() + "`"; // cannot inline this rocksdb::Slice upper_bound(upper_bound_key); rocksdb::Iterator* iter = store->scan(seq_id_prefix, &upper_bound); std::unique_ptr<rocksdb::Iterator> iter_guard(iter); size_t num_found_docs = 0; std::vector<index_record> iter_batch; const size_t index_batch_size = 1000; auto begin = std::chrono::high_resolution_clock::now(); while(iter->Valid() && iter->key().starts_with(seq_id_prefix)) { num_found_docs++; const uint32_t seq_id = Collection::get_seq_id_from_key(iter->key().ToString()); nlohmann::json document; try { document = nlohmann::json::parse(iter->value().ToString()); } catch(const std::exception& e) { return Option<bool>(400, "Bad JSON in document: " + document.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore)); } if(enable_nested_fields) { std::vector<field> flattened_fields; field::flatten_doc(document, nested_fields, {}, true, flattened_fields); } index_record record(num_found_docs, seq_id, document, index_operation_t::CREATE, DIRTY_VALUES::COERCE_OR_DROP); iter_batch.emplace_back(std::move(record)); // Peek and check for last record right here so that we handle batched indexing correctly // Without doing this, the "last batch" would have to be indexed outside the loop. iter->Next(); bool last_record = !(iter->Valid() && iter->key().starts_with(seq_id_prefix)); if(num_found_docs % index_batch_size == 0 || last_record) { // put delete first because a field could be deleted and added in the same change set if(!del_fields.empty()) { for(auto& rec: iter_batch) { index->remove(seq_id, rec.doc, del_fields, true); } } Index::batch_memory_index(index, iter_batch, default_sorting_field, search_schema, embedding_fields, fallback_field_type, token_separators, symbols_to_index, true, 200, 60000, 2, found_embedding_field, true, schema_additions); if(found_embedding_field) { for(auto& index_record : iter_batch) { if(index_record.indexed.ok()) { remove_flat_fields(index_record.doc); const std::string& serialized_json = index_record.doc.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore); bool write_ok = store->insert(get_seq_id_key(index_record.seq_id), serialized_json); if(!write_ok) { LOG(ERROR) << "Inserting doc with new embedding field failed for seq id: " << index_record.seq_id; index_record.index_failure(500, "Could not write to on-disk storage."); } else { index_record.index_success(); } } } } iter_batch.clear(); } if(num_found_docs % ((1 << 14)) == 0) { // having a cheaper higher layer check to prevent checking clock too often auto time_elapsed = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::high_resolution_clock::now() - begin).count(); if(time_elapsed > 30) { begin = std::chrono::high_resolution_clock::now(); LOG(INFO) << "Altered " << num_found_docs << " so far."; } } } LOG(INFO) << "Finished altering " << num_found_docs << " document(s)."; shlock.unlock(); ulock.lock(); std::vector<field> garbage_embedding_fields_vec; for(auto& del_field: del_fields) { search_schema.erase(del_field.name); auto new_end = std::remove_if(fields.begin(), fields.end(), [&del_field](const field& f) { return f.name == del_field.name; }); fields.erase(new_end, fields.end()); if(del_field.is_dynamic()) { dynamic_fields.erase(del_field.name); } if(del_field.nested) { nested_fields.erase(del_field.name); } if(del_field.embed.count(fields::from) != 0) { remove_embedding_field(del_field.name); } if(del_field.name == ".*") { fallback_field_type = ""; } if(del_field.name == default_sorting_field) { default_sorting_field = ""; } process_remove_field_for_embedding_fields(del_field, garbage_embedding_fields_vec); } ulock.unlock(); shlock.lock(); index->refresh_schemas({}, del_fields); index->refresh_schemas({}, garbage_embedding_fields_vec); auto persist_op = persist_collection_meta(); if(!persist_op.ok()) { return persist_op; } return Option<bool>(true); } Option<bool> Collection::alter(nlohmann::json& alter_payload) { std::shared_lock shlock(mutex); LOG(INFO) << "Collection " << name << " is being prepared for alter..."; // Validate that all stored documents are compatible with the proposed schema changes. std::vector<field> del_fields; std::vector<field> addition_fields; std::vector<field> reindex_fields; std::vector<field> update_fields; std::string this_fallback_field_type; auto validate_op = validate_alter_payload(alter_payload, addition_fields, reindex_fields, del_fields, update_fields, this_fallback_field_type); if(!validate_op.ok()) { LOG(INFO) << "Alter failed validation: " << validate_op.error(); return validate_op; } if(!this_fallback_field_type.empty() && !fallback_field_type.empty()) { LOG(INFO) << "Alter failed: schema already contains a `.*` field."; return Option<bool>(400, "The schema already contains a `.*` field."); } shlock.unlock(); if(!this_fallback_field_type.empty() && fallback_field_type.empty()) { std::unique_lock ulock(mutex); fallback_field_type = this_fallback_field_type; } LOG(INFO) << "Alter payload validation is successful..."; if(!reindex_fields.empty()) { LOG(INFO) << "Processing field additions and deletions first..."; } auto batch_alter_op = batch_alter_data(addition_fields, del_fields, fallback_field_type); if(!batch_alter_op.ok()) { LOG(INFO) << "Alter failed during alter data: " << batch_alter_op.error(); return batch_alter_op; } if(!reindex_fields.empty()) { LOG(INFO) << "Processing field modifications now..."; batch_alter_op = batch_alter_data(reindex_fields, {}, fallback_field_type); if(!batch_alter_op.ok()) { LOG(INFO) << "Alter failed during alter data: " << batch_alter_op.error(); return batch_alter_op; } } if(!update_fields.empty()) { for(const auto& f : update_fields) { if(f.embed.count(fields::from) != 0) { //it's an embed field auto op = update_apikey(f.embed[fields::model_config], f.name); if(!op.ok()) { return op; } } } } // hide credentials in the alter payload return for(auto& field_json : alter_payload["fields"]) { if(field_json[fields::embed].count(fields::model_config) != 0) { hide_credential(field_json[fields::embed][fields::model_config], "api_key"); hide_credential(field_json[fields::embed][fields::model_config], "access_token"); hide_credential(field_json[fields::embed][fields::model_config], "refresh_token"); hide_credential(field_json[fields::embed][fields::model_config], "client_id"); hide_credential(field_json[fields::embed][fields::model_config], "client_secret"); hide_credential(field_json[fields::embed][fields::model_config], "project_id"); } } return Option<bool>(true); } void Collection::remove_flat_fields(nlohmann::json& document) { if(document.count(".flat") != 0) { for(const auto& flat_key: document[".flat"].get<std::vector<std::string>>()) { document.erase(flat_key); } document.erase(".flat"); } } void Collection::remove_reference_helper_fields(nlohmann::json& document) { if(document.count(fields::reference_helper_fields) != 0) { for(const auto& key: document[fields::reference_helper_fields].get<std::vector<std::string>>()) { document.erase(key); } document.erase(fields::reference_helper_fields); } } Option<bool> Collection::prune_doc_with_lock(nlohmann::json& doc, const tsl::htrie_set<char>& include_names, const tsl::htrie_set<char>& exclude_names, const std::map<std::string, reference_filter_result_t>& reference_filter_results, const uint32_t& seq_id, const std::vector<ref_include_exclude_fields>& ref_include_exclude_fields_vec) { std::shared_lock lock(mutex); return prune_doc(doc, include_names, exclude_names, "", 0, reference_filter_results, this, seq_id, ref_include_exclude_fields_vec); } Option<bool> Collection::prune_doc(nlohmann::json& doc, const tsl::htrie_set<char>& include_names, const tsl::htrie_set<char>& exclude_names, const std::string& parent_name, size_t depth, const std::map<std::string, reference_filter_result_t>& reference_filter_results, Collection *const collection, const uint32_t& seq_id, const std::vector<ref_include_exclude_fields>& ref_include_exclude_fields_vec) { nlohmann::json original_doc; if (!ref_include_exclude_fields_vec.empty()) { original_doc = doc; } // doc can only be an object auto it = doc.begin(); while(it != doc.end()) { std::string nested_name = parent_name + (parent_name.empty() ? it.key() : "." + it.key()); //LOG(INFO) << "it.key(): " << it.key() << ", nested_name: " << nested_name; // use prefix lookup to prune non-matching sub-trees early auto prefix_it = include_names.equal_prefix_range(nested_name); if(!include_names.empty() && prefix_it.first == prefix_it.second) { // prefix not found in allowed list of highlight field names, so can trim early it = doc.erase(it); continue ; } if(exclude_names.count(nested_name) != 0) { it = doc.erase(it); continue ; } if(exclude_names.empty() && !include_names.empty() && include_names.count(nested_name) != 0) { // without exclusions, we can pick the sub-tree early if parent name is found in include names it++; continue; } if(it.value().is_object()) { bool is_orig_empty = it.value().empty(); prune_doc(it.value(), include_names, exclude_names, nested_name, depth+1); if(!is_orig_empty && it.value().empty()) { it = doc.erase(it); } else { it++; } continue; } else if(it.value().is_array()) { bool orig_array_empty = it.value().empty(); bool primitive_array = true; auto arr_it = it.value().begin(); while(arr_it != it.value().end()) { // NOTE: we will not support array of array of nested objects primitive_array = primitive_array && !arr_it.value().is_object(); if(arr_it.value().is_object()) { bool orig_ele_empty = arr_it.value().empty(); prune_doc(arr_it.value(), include_names, exclude_names, nested_name, depth+1); // don't remove empty array objects to help frontend } arr_it++; } if(!orig_array_empty && it.value().empty()) { // only drop field if array became empty because of pruning (and not empty already) it = doc.erase(it); continue; } if(!primitive_array) { it++; continue; } } if(!include_names.empty() && include_names.count(nested_name) == 0) { // at this point, name should match fully, otherwise we should erase the value it = doc.erase(it); continue; } it++; } return Join::include_references(doc, seq_id, collection, reference_filter_results, ref_include_exclude_fields_vec, original_doc); } Option<bool> Collection::validate_alter_payload(nlohmann::json& schema_changes, std::vector<field>& addition_fields, std::vector<field>& reindex_fields, std::vector<field>& del_fields, std::vector<field>& update_fields, std::string& fallback_field_type) { if(!schema_changes.is_object()) { return Option<bool>(400, "Bad JSON."); } if(schema_changes.size() != 1) { return Option<bool>(400, "Only `fields` and `metadata` can be updated at the moment."); } const std::string err_msg = "The `fields` value should be an array of objects containing " "the field `name` and other properties."; if((!schema_changes.contains("fields") || !schema_changes["fields"].is_array() || schema_changes["fields"].empty())) { return Option<bool>(400, err_msg); } // basic validation of fields std::vector<field> diff_fields; tsl::htrie_map<char, field> updated_search_schema = search_schema; tsl::htrie_map<char, field> updated_nested_fields = nested_fields; tsl::htrie_map<char, field> updated_embedding_fields = embedding_fields; size_t num_auto_detect_fields = 0; // since fields can be deleted and added in the same change set, // we will first do a pass at basic validations and pick out fields to be deleted std::set<std::string> delete_field_names; // ensure that drop values are at the top: required for drop+add use case std::sort(schema_changes["fields"].begin(), schema_changes["fields"].end(), [](nlohmann::json& a, nlohmann::json& b) { return a.contains("drop") > b.contains("drop"); }); for(const auto& kv: schema_changes["fields"].items()) { if (!kv.value().is_object()) { return Option<bool>(400, err_msg); } if (!kv.value().contains("name")) { return Option<bool>(400, err_msg); } const std::string& field_name = kv.value()["name"].get<std::string>(); if(field_name == "id") { return Option<bool>(400, "Field `" + field_name + "` cannot be altered."); } if(kv.value().contains("drop")) { delete_field_names.insert(field_name); } } std::unordered_map<std::string, field> new_dynamic_fields; int json_array_index = -1; for(const auto& kv: schema_changes["fields"].items()) { json_array_index++; const std::string& field_name = kv.value()["name"].get<std::string>(); const auto& field_it = search_schema.find(field_name); auto found_field = (field_it != search_schema.end()); auto dyn_field_it = dynamic_fields.find(field_name); auto found_dyn_field = (dyn_field_it != dynamic_fields.end()); if(kv.value().contains("drop")) { if(!kv.value()["drop"].is_boolean() || !kv.value()["drop"].get<bool>()) { return Option<bool>(400, "Field `" + field_name + "` must have a drop value of `true`."); } if(field_name == ".*") { del_fields.emplace_back(".*", field_types::AUTO, false); continue; } if(!found_field && !found_dyn_field) { return Option<bool>(400, "Field `" + field_name + "` is not part of collection schema."); } if(found_field && field_it.value().embed.count(fields::from) != 0) { updated_embedding_fields.erase(field_it.key()); } if(found_field) { del_fields.push_back(field_it.value()); updated_search_schema.erase(field_it.key()); updated_nested_fields.erase(field_it.key()); if(field_it.value().embed.count(fields::from) != 0) { updated_embedding_fields.erase(field_it.key()); } // should also remove children if the field being dropped is an object if(field_it.value().nested && enable_nested_fields) { auto prefix_it = search_schema.equal_prefix_range(field_name); for(auto prefix_kv = prefix_it.first; prefix_kv != prefix_it.second; ++prefix_kv) { bool exact_key_match = (prefix_kv.key().size() == field_name.size()); if(!exact_key_match) { del_fields.push_back(prefix_kv.value()); updated_search_schema.erase(prefix_kv.key()); updated_nested_fields.erase(prefix_kv.key()); if(prefix_kv.value().embed.count(fields::from) != 0) { updated_embedding_fields.erase(prefix_kv.key()); } } } } } // NOTE: fields with type "auto" or "string*" will exist in both `search_schema` and `dynamic_fields` if(found_dyn_field) { del_fields.push_back(dyn_field_it->second); // we will also have to resolve the actual field names which match the dynamic field pattern for(auto& a_field: search_schema) { if(std::regex_match(a_field.name, std::regex(dyn_field_it->first))) { del_fields.push_back(a_field); // if schema contains explicit fields that match dynamic field that're going to be removed, // we will have to remove them from the schema so that validation can occur properly updated_search_schema.erase(a_field.name); } } } } else { // add or update existing field auto is_addition = (!found_field && !found_dyn_field); auto is_reindex = (delete_field_names.count(field_name) != 0); if(is_addition && is_reindex) { return Option<bool>(400, "Field `" + field_name + "` cannot be added and deleted at the same time."); } if(is_addition || is_reindex) { // must validate fields auto parse_op = field::json_field_to_field(enable_nested_fields, kv.value(), diff_fields, fallback_field_type, num_auto_detect_fields); if (!parse_op.ok()) { return parse_op; } auto& f = diff_fields.back(); if(f.is_dynamic()) { new_dynamic_fields[f.name] = f; } else { updated_search_schema[f.name] = f; } if(!f.embed.empty()) { auto validate_res = field::validate_and_init_embed_field(search_schema, schema_changes["fields"][json_array_index], schema_changes["fields"], f); if(!validate_res.ok()) { return validate_res; } } if(is_reindex) { reindex_fields.push_back(f); } else { addition_fields.push_back(f); } if(f.embed.count(fields::from) != 0) { embedding_fields.emplace(f.name, f); } if(f.nested && enable_nested_fields) { check_and_add_nested_field(updated_nested_fields, f); // should also add children if the field is an object auto prefix_it = search_schema.equal_prefix_range(field_name); for(auto prefix_kv = prefix_it.first; prefix_kv != prefix_it.second; ++prefix_kv) { bool exact_key_match = (prefix_kv.key().size() == field_name.size()); if(!exact_key_match) { updated_search_schema.emplace(prefix_kv.key(), prefix_kv.value()); check_and_add_nested_field(updated_nested_fields, prefix_kv.value()); if(prefix_kv.value().embed.count(fields::from) != 0) { embedding_fields.emplace(prefix_kv.key(), prefix_kv.value()); } if(is_reindex) { reindex_fields.push_back(prefix_kv.value()); } else { addition_fields.push_back(prefix_kv.value()); } } } } } else if (found_field && field_it->embed.count(fields::from) != 0) { //embedded field, only api key updation is supported if(!kv.value().contains(fields::embed) || !kv.value()[fields::embed].is_object()) { return Option<bool>(400, "Missing or bad `embed` param."); } if (!kv.value()[fields::embed].contains(fields::model_config) || !kv.value()[fields::embed][fields::model_config].is_object()) { return Option<bool>(400, "`model_config` should be an object containing `model_name` and `api_key`."); } const auto &model_config = kv.value()[fields::embed][fields::model_config]; if (!model_config.contains(fields::model_name) || !model_config.contains(fields::api_key) || !model_config[fields::model_name].is_string() || !model_config[fields::api_key].is_string()) { return Option<bool>(400, "`model_config` should be an object containing `model_name` and `api_key` as string values."); } field f(field_name, field_it->type, field_it->facet); f.embed = kv.value()[fields::embed]; update_fields.push_back(f); } else { // partial update is not supported for now return Option<bool>(400, "Field `" + field_name + "` is already part of the schema: To " "change this field, drop it first before adding it back to the schema."); } } } if(num_auto_detect_fields > 1) { return Option<bool>(400, "There can be only one field named `.*`."); } // data validations: here we ensure that already stored data is compatible with requested schema changes const std::string seq_id_prefix = get_seq_id_collection_prefix(); std::string upper_bound_key = get_seq_id_collection_prefix() + "`"; // cannot inline this rocksdb::Slice upper_bound(upper_bound_key); rocksdb::Iterator* iter = store->scan(seq_id_prefix, &upper_bound); std::unique_ptr<rocksdb::Iterator> iter_guard(iter); size_t num_found_docs = 0; auto begin = std::chrono::high_resolution_clock::now(); while(iter->Valid() && iter->key().starts_with(seq_id_prefix)) { num_found_docs++; const uint32_t seq_id = Collection::get_seq_id_from_key(iter->key().ToString()); nlohmann::json document; try { document = nlohmann::json::parse(iter->value().ToString()); } catch(const std::exception& e) { return Option<bool>(400, "Bad JSON in document: " + document.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore)); } if(!fallback_field_type.empty() || !new_dynamic_fields.empty() || !updated_nested_fields.empty()) { std::vector<field> new_fields; Option<bool> new_fields_op = detect_new_fields(document, DIRTY_VALUES::DROP, updated_search_schema, new_dynamic_fields, updated_nested_fields, fallback_field_type, false, new_fields, enable_nested_fields, reference_fields, object_reference_helper_fields); if(!new_fields_op.ok()) { return new_fields_op; } for(auto& new_field: new_fields) { if(updated_search_schema.find(new_field.name) == updated_search_schema.end()) { if(new_field.nested) { auto del_field_it = std::find_if(del_fields.begin(), del_fields.end(), [&new_field](const field& f) { return f.name == new_field.name; }); auto re_field_it = std::find_if(reindex_fields.begin(), reindex_fields.end(), [&new_field](const field& f) { return f.name == new_field.name; }); if(del_field_it != del_fields.end() && re_field_it == reindex_fields.end()) { // If the discovered field is already being deleted and is not part of reindex fields, // we should ignore. This can happen when we are trying to drop a nested object's child. continue; } } reindex_fields.push_back(new_field); updated_search_schema[new_field.name] = new_field; } } } // validate existing data on disk for compatibility via updated_search_schema auto validate_op = validator_t::validate_index_in_memory(document, seq_id, default_sorting_field, updated_search_schema, updated_embedding_fields, index_operation_t::CREATE, false, fallback_field_type, DIRTY_VALUES::COERCE_OR_REJECT); if(!validate_op.ok()) { std::string err_message = validate_op.error(); // we've to message the error message to suite the schema alter context if(err_message.find("but is not found in the document.") != std::string::npos) { // missing field err_message.pop_back(); // delete trailing dot err_message += "s already present in the collection. If you still want to add this field, " "set it as `optional: true`."; return Option<bool>(validate_op.code(), err_message); } else if(err_message.find("must be") != std::string::npos) { // type of an already stored document conflicts with new schema std::string type_error = "Schema change is incompatible with the type of documents already stored " "in this collection."; std::vector<std::string> err_parts; StringUtils::split(err_message, err_parts, "must be"); if(err_parts.size() == 2) { err_parts[0][0] = std::tolower(err_parts[0][0]); type_error += " Existing data for " + err_parts[0] + " cannot be coerced into " + err_parts[1]; } return Option<bool>(validate_op.code(), type_error); } else { std::string schema_err = "Schema change is incompatible with the type of documents already stored " "in this collection. error: " + validate_op.error(); return Option<bool>(validate_op.code(), schema_err); } } if(num_found_docs % ((1 << 14)) == 0) { // having a cheaper higher layer check to prevent checking clock too often auto time_elapsed = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::high_resolution_clock::now() - begin).count(); if(time_elapsed > 30) { begin = std::chrono::high_resolution_clock::now(); LOG(INFO) << "Verified " << num_found_docs << " so far."; } } iter->Next(); } return Option<bool>(true); } Option<bool> Collection::resolve_field_type(field& new_field, nlohmann::detail::iter_impl<nlohmann::basic_json<>>& kv, nlohmann::json& document, const DIRTY_VALUES& dirty_values, const bool found_dynamic_field, const std::string& fallback_field_type, const bool enable_nested_fields, std::vector<field>& new_fields) { if(!new_field.index) { return Option<bool>(true); } // Type detection scenarios: // a) Not a dynamic field + fallback type is explicit: use fallback type // b) Dynamic field + type is explicit: use explicit type // c) Not a dynamic field + fallback type is auto: detect and assign type // d) Dynamic field + type is auto: detect and assign type // e) Not a dynamic field + fallback type is string*: map to string/string[] // f) Dynamic field + type is string*: map to string/string[] const std::string& test_field_type = found_dynamic_field ? new_field.type : fallback_field_type; if(test_field_type == field_types::AUTO || field_types::is_string_or_array(test_field_type)) { if(kv.key() == ".*") { return Option<bool>(true); } std::string field_type; bool parseable = field::get_type(kv.value(), field_type); if(!parseable) { if(kv.value().is_null() && new_field.optional) { // null values are allowed only if field is optional kv = document.erase(kv); return Option<bool>(false); } if(kv.value().is_object()) { return Option<bool>(true); } if(kv.value().is_array() && kv.value().empty()) { return Option<bool>(true); } if(dirty_values == DIRTY_VALUES::REJECT || dirty_values == DIRTY_VALUES::COERCE_OR_REJECT) { return Option<bool>(400, "Type of field `" + kv.key() + "` is invalid."); } else { // DROP or COERCE_OR_DROP kv = document.erase(kv); return Option<bool>(false); } } if(test_field_type == field_types::AUTO) { new_field.type = field_type; if(new_field.is_object()) { new_field.nested = true; } } else { if (kv.value().is_array()) { new_field.type = field_types::STRING_ARRAY; } else { new_field.type = field_types::STRING; } } } else { new_field.type = test_field_type; } if (new_field.is_num_sort_field()) { // only numerical fields are added to sort index in dynamic type detection new_field.sort = true; } if(enable_nested_fields || !new_field.nested) { // only detect nested field if it is enabled explicitly new_fields.emplace_back(new_field); } return Option<bool>(true); } Option<bool> Collection::detect_new_fields(nlohmann::json& document, const DIRTY_VALUES& dirty_values, const tsl::htrie_map<char, field>& schema, const std::unordered_map<std::string, field>& dyn_fields, tsl::htrie_map<char, field>& nested_fields, const std::string& fallback_field_type, bool is_update, std::vector<field>& new_fields, const bool enable_nested_fields, const spp::sparse_hash_map<std::string, reference_info_t>& reference_fields, tsl::htrie_set<char>& object_reference_helper_fields) { auto kv = document.begin(); while(kv != document.end()) { // we will not index the special "id" key if (schema.count(kv.key()) == 0 && kv.key() != "id") { const std::string &fname = kv.key(); field new_field(fname, field_types::STRING, false, true); bool found_dynamic_field = false; bool skip_field = false; // check against dynamic field definitions for(auto dyn_field_it = dyn_fields.begin(); dyn_field_it != dyn_fields.end(); dyn_field_it++) { auto& dynamic_field = dyn_field_it->second; if(std::regex_match (kv.key(), std::regex(dynamic_field.name))) { // to prevent confusion we also disallow dynamic field names that contain ".*" if((kv.key() != ".*" && kv.key().find(".*") != std::string::npos)) { skip_field = true; break; } new_field = dynamic_field; new_field.name = fname; found_dynamic_field = true; if(kv->is_object() && dynamic_field.name.find(".*") == kv.key().size()) { // e.g. { name => price.*, type: float } to match price.USD, price.UK etc. // top-level price field should be treated as type `object` and NOT `float`. new_field.nested = true; new_field.type = field_types::OBJECT; new_field.sort = false; } break; } } if(skip_field) { kv++; continue; } if(!found_dynamic_field && fallback_field_type.empty()) { // we will not auto detect schema for non-dynamic fields if auto detection is not enabled kv++; continue; } auto add_op = resolve_field_type(new_field, kv, document, dirty_values, found_dynamic_field, fallback_field_type, enable_nested_fields, new_fields); if(!add_op.ok()) { return add_op; } bool increment_iter = add_op.get(); if(!increment_iter) { continue; } } kv++; } if(enable_nested_fields) { for(auto& new_field: new_fields) { if(new_field.nested) { check_and_add_nested_field(nested_fields, new_field); } } std::vector<field> flattened_fields; auto flatten_op = field::flatten_doc(document, nested_fields, dyn_fields, is_update, flattened_fields); if(!flatten_op.ok()) { return flatten_op; } for(const auto& flattened_field: flattened_fields) { if(schema.find(flattened_field.name) == schema.end()) { new_fields.push_back(flattened_field); check_and_add_nested_field(nested_fields, flattened_field); } } } auto add_reference_helper_fields_op = Join::add_reference_helper_fields(document, schema, reference_fields, object_reference_helper_fields, is_update); if (!add_reference_helper_fields_op.ok()) { return add_reference_helper_fields_op; } return Option<bool>(true); } Index* Collection::init_index() { for(const field& field: fields) { if(field.is_dynamic()) { // regexp fields and fields with auto type are treated as dynamic fields dynamic_fields.emplace(field.name, field); continue; } if(field.name == ".*") { continue; } search_schema.emplace(field.name, field); if(field.nested) { check_and_add_nested_field(nested_fields, field); } if(field.embed.count(fields::from) != 0) { embedding_fields.emplace(field.name, field); } if(!field.reference.empty()) { auto dot_index = field.reference.find('.'); auto ref_coll_name = field.reference.substr(0, dot_index); auto ref_field_name = field.reference.substr(dot_index + 1); auto& collectionManager = CollectionManager::get_instance(); auto ref_coll = collectionManager.get_collection(ref_coll_name); if (ref_coll != nullptr) { // `CollectionManager::get_collection` accounts for collection alias being used and provides pointer to // the original collection. ref_coll_name = ref_coll->name; ref_coll->add_referenced_in(name, field.name, field.is_async_reference, ref_field_name); } else { // Reference collection has not been created yet. collectionManager.add_referenced_in_backlog(ref_coll_name, reference_info_t{name, field.name, field.is_async_reference, ref_field_name}); } reference_fields.emplace(field.name, reference_info_t(ref_coll_name, ref_field_name, field.is_async_reference)); if (field.nested) { object_reference_helper_fields.insert(field.name); } } } synonym_index = new SynonymIndex(store); return new Index(name+std::to_string(0), collection_id, store, synonym_index, CollectionManager::get_instance().get_thread_pool(), search_schema, symbols_to_index, token_separators); } DIRTY_VALUES Collection::parse_dirty_values_option(std::string& dirty_values) const { std::shared_lock lock(mutex); StringUtils::toupper(dirty_values); auto dirty_values_op = magic_enum::enum_cast<DIRTY_VALUES>(dirty_values); DIRTY_VALUES dirty_values_action; if(dirty_values_op.has_value()) { dirty_values_action = dirty_values_op.value(); } else { dirty_values_action = (fallback_field_type.empty() && dynamic_fields.empty()) ? DIRTY_VALUES::REJECT : DIRTY_VALUES::COERCE_OR_REJECT; } return dirty_values_action; } std::vector<char> Collection::to_char_array(const std::vector<std::string>& strings) { std::vector<char> vec; for(const auto& s: strings) { if(s.length() == 1) { vec.push_back(s[0]); } } return vec; } std::vector<char> Collection::get_symbols_to_index() { return symbols_to_index; } std::vector<char> Collection::get_token_separators() { return token_separators; } std::string Collection::get_fallback_field_type() { return fallback_field_type; } bool Collection::get_enable_nested_fields() { return enable_nested_fields; } Option<bool> Collection::parse_facet(const std::string& facet_field, std::vector<facet>& facets) const { const std::string _alpha = "_alpha"; bool top_k = false; std::string facet_field_name, param_str; bool paran_open = false; //for ( bool brace_open = false; //for [ std::string order = ""; bool sort_alpha = false; std::string sort_field = ""; bool colon_found = false; bool top_k_found = false; bool sort_found = false; unsigned facet_param_count = 0; unsigned commaCount = 0; bool is_wildcard = false; std::vector<std::tuple<int64_t, int64_t, std::string>> tupVec; for(int i = 0; i < facet_field.size(); ) { if(facet_field[i] == '(') { //facet field name complete, check validity if(search_schema.count(facet_field_name) == 0 || !search_schema.at(facet_field_name).facet) { std::string error = "Could not find a facet field named `" + facet_field_name + "` in the schema."; return Option<bool>(404, error); } paran_open = true; i++; continue; } else if(facet_field[i] == '*') { if(i == facet_field.size() - 1) { auto prefix = facet_field.substr(0, facet_field.size() - 1); auto pair = search_schema.equal_prefix_range(prefix); if(pair.first == pair.second) { // not found std::string error = "Could not find a facet field for `" + facet_field + "` in the schema."; return Option<bool>(404, error); } // Collect the fields that match the prefix and are marked as facet. for(auto field = pair.first; field != pair.second; field++) { if(field->facet) { facets.emplace_back(facet(field->name, facets.size())); facets.back().is_wildcard_match = true; } } i++; is_wildcard = true; continue; } else { return Option<bool>(404, "Only prefix matching with a wildcard is allowed."); } } else if(facet_field[i] == ')') { if(paran_open == true && (facet_param_count == commaCount + 1)) { if(!colon_found && !top_k_found) { return Option<bool>(400, "Invalid facet param `" + param_str + "`."); } paran_open = false; commaCount = facet_param_count; break; } else { return Option<bool>(400, "Invalid facet format."); } } else if(facet_field[i] == ':') { if(paran_open == false || facet_param_count != commaCount) { return Option<bool>(400, "Invalid facet format."); } colon_found = true; StringUtils::trim(param_str); if(param_str == "sort_by") { //sort_by params sort_found = true; for(i; facet_field.size(); i++) { if(facet_field[i] == ',' || facet_field[i] == ')') { break; } else { param_str+=facet_field[i]; } } std::vector<std::string> tokens; StringUtils::split(param_str, tokens, ":"); if(tokens.size() != 3) { std::string error = "Invalid sort format."; return Option<bool>(400, error); } if(tokens[1] == _alpha) { const field& a_field = search_schema.at(facet_field_name); if(!a_field.is_string()) { std::string error = "Facet field should be string type to apply alpha sort."; return Option<bool>(400, error); } sort_alpha = true; } else { //sort_field based sort sort_field = tokens[1]; if(search_schema.count(sort_field) == 0 || !search_schema.at(sort_field).facet) { std::string error = "Could not find a facet field named `" + sort_field + "` in the schema."; return Option<bool>(404, error); } const field& a_field = search_schema.at(sort_field); if(a_field.is_string()) { std::string error = "Sort field should be non string type to apply sort."; return Option<bool>(400, error); } } if(tokens[2] == "asc") { order = "asc"; } else if(tokens[2] == "desc") { order = "desc"; } else { std::string error = "Invalid sort param."; return Option<bool>(400, error); } facet_param_count++; } else if(param_str == "top_k") { //top_k param top_k_found = true; param_str.clear(); i++; //skip : for(i; i < facet_field.size(); i++) { if(facet_field[i] == ',' || facet_field[i] == ')') { break; } param_str+=facet_field[i]; } if(param_str.empty() || (param_str != "true" && param_str != "false")) { return Option<bool>(400, "top_k string format is invalid."); } if(param_str == "true") { top_k = true; } facet_param_count++; } else if((i + 1) < facet_field.size() && facet_field[i+1] == '[') { //range params const field& a_field = search_schema.at(facet_field_name); if(tupVec.empty()) { if(!a_field.is_integer() && !a_field.is_float()) { std::string error = "Range facet is restricted to only integer and float fields."; return Option<bool>(400, error); } if(!a_field.sort) { return Option<bool>(400, "Range facets require sort enabled for the field."); } } auto range_val = param_str; StringUtils::trim(range_val); if(range_val.empty()) { return Option<bool>(400, "Facet range value is not valid."); } std::string lower, upper; int64_t lower_range, upper_range; brace_open = true; auto commaFound = 0; i+=2; //skip : and [ param_str.clear(); while(i < facet_field.size()) { if(facet_field[i]== ',') { if(commaFound == 1) { return Option<bool>(400, "Error splitting the facet range values."); } lower = param_str; StringUtils::trim(lower); param_str.clear(); commaFound++; } else if(facet_field[i] == ']') { brace_open = false; upper = param_str; StringUtils::trim(upper); i++; //skip ] and break loop break; } else if(facet_field[i] == ')') { return Option<bool>(400, "Error splitting the facet range values."); } else { param_str += facet_field[i]; } i++; } if(lower.empty()) { lower_range = INT64_MIN; } else if(a_field.is_integer() && StringUtils::is_int64_t(lower)) { lower_range = std::stoll(lower); } else if(a_field.is_float() && StringUtils::is_float(lower)) { float val = std::stof(lower); lower_range = Index::float_to_int64_t(val); } else { return Option<bool>(400, "Facet range value is not valid."); } if(upper.empty()) { upper_range = INT64_MAX; } else if(a_field.is_integer() && StringUtils::is_int64_t(upper)) { upper_range = std::stoll(upper); } else if(a_field.is_float() && StringUtils::is_float(upper)) { float val = std::stof(upper); upper_range = Index::float_to_int64_t(val); } else { return Option<bool>(400, "Facet range value is not valid."); } tupVec.emplace_back(lower_range, upper_range, range_val); facet_param_count++; } else { return Option<bool>(400, "Invalid facet param `" + param_str + "`."); } continue; } else if(facet_field[i] == ',') { param_str.clear(); commaCount++; i++; continue; } if(!paran_open) { facet_field_name+=facet_field[i]; } else { param_str+=facet_field[i]; } i++; } if(paran_open || brace_open || facet_param_count != commaCount) { return Option<bool>(400, "Invalid facet format."); } if(facet_param_count == 0 && !is_wildcard) { //facets with params will be validated while parsing // for normal facets need to perform check if(search_schema.count(facet_field_name) == 0 || !search_schema.at(facet_field_name).facet) { std::string error = "Could not find a facet field named `" + facet_field_name + "` in the schema."; return Option<bool>(404, error); } } if(!tupVec.empty()) { //add range facets sort(tupVec.begin(), tupVec.end()); facet a_facet(facet_field_name, facets.size()); auto& range_map = a_facet.facet_range_map; for(const auto& tup: tupVec) { const auto& lower_range = std::get<0>(tup); const auto& upper_range = std::get<1>(tup); const std::string& range_val = std::get<2>(tup); //check if ranges are continous or not if((!range_map.empty()) && (range_map.find(lower_range) == range_map.end())) { std::string error = "Ranges in range facet syntax should be continous."; return Option<bool>(400, error); } range_map[upper_range] = range_specs_t{range_val, lower_range}; } a_facet.is_range_query = true; a_facet.is_top_k = top_k; facets.emplace_back(std::move(a_facet)); } else if(!is_wildcard) { //add other facet types, wildcard facets are already added while parsing facets.emplace_back(facet(facet_field_name, facets.size(), top_k, {}, false, sort_alpha, order, sort_field)); } return Option<bool>(true); } Option<bool> Collection::populate_include_exclude_fields(const spp::sparse_hash_set<std::string>& include_fields, const spp::sparse_hash_set<std::string>& exclude_fields, tsl::htrie_set<char>& include_fields_full, tsl::htrie_set<char>& exclude_fields_full) const { std::vector<std::string> include_fields_vec; std::vector<std::string> exclude_fields_vec; for(auto& f_name: include_fields) { auto field_op = extract_field_name(f_name, search_schema, include_fields_vec, false, enable_nested_fields, true, true); if(!field_op.ok()) { if(field_op.code() == 404) { // field need not be part of schema to be included (could be a stored value in the doc) include_fields_vec.push_back(f_name); continue; } return Option<bool>(field_op.code(), field_op.error()); } } for(auto& f_name: exclude_fields) { if(f_name == "out_of") { // `out_of` is strictly a meta-field, but we handle it since it's useful continue; } if(f_name == "conversation_history") { continue; } auto field_op = extract_field_name(f_name, search_schema, exclude_fields_vec, false, enable_nested_fields, true, true); if(!field_op.ok()) { if(field_op.code() == 404) { // field need not be part of schema to be excluded (could be a stored value in the doc) exclude_fields_vec.push_back(f_name); continue; } return Option<bool>(field_op.code(), field_op.error()); } } for(auto& f_name: include_fields_vec) { include_fields_full.insert(f_name); } for(auto& f_name: exclude_fields_vec) { exclude_fields_full.insert(f_name); } return Option<bool>(true); } Option<bool> Collection::populate_include_exclude_fields_lk(const spp::sparse_hash_set<std::string>& include_fields, const spp::sparse_hash_set<std::string>& exclude_fields, tsl::htrie_set<char>& include_fields_full, tsl::htrie_set<char>& exclude_fields_full) const { std::shared_lock lock(mutex); return populate_include_exclude_fields(include_fields, exclude_fields, include_fields_full, exclude_fields_full); } // Removes the dropped field from embed_from of all embedding fields. void Collection::process_remove_field_for_embedding_fields(const field& del_field, std::vector<field>& garbage_embed_fields) { for(auto& field : fields) { if(field.embed.count(fields::from) == 0) { continue; } bool found_field = false; nlohmann::json& embed_from_names = field.embed[fields::from]; for(auto it = embed_from_names.begin(); it != embed_from_names.end();) { if(it.value() == del_field.name) { it = embed_from_names.erase(it); found_field = true; } else { it++; } } if(found_field) { // mark this embedding field as "garbage" if it has no more embed_from fields if(embed_from_names.empty()) { garbage_embed_fields.push_back(field); } else { // the dropped field was present in `embed_from`, so we have to update the field objects field.embed[fields::from] = embed_from_names; embedding_fields[field.name].embed[fields::from] = embed_from_names; } } } for(auto& garbage_field: garbage_embed_fields) { remove_embedding_field(garbage_field.name); search_schema.erase(garbage_field.name); fields.erase(std::remove_if(fields.begin(), fields.end(), [&garbage_field](const auto &f) { return f.name == garbage_field.name; }), fields.end()); } } void Collection::hide_credential(nlohmann::json& json, const std::string& credential_name) { if(json.count(credential_name) != 0) { // hide api key with * except first 5 chars std::string credential_name_str = json[credential_name]; if(credential_name_str.size() > 5) { size_t num_chars_to_replace = credential_name_str.size() - 5; json[credential_name] = credential_name_str.replace(5, num_chars_to_replace, num_chars_to_replace, '*'); } else { json[credential_name] = "***********"; } } } Option<bool> Collection::truncate_after_top_k(const string &field_name, size_t k) { std::shared_lock slock(mutex); std::vector<uint32_t> seq_ids; auto op = index->seq_ids_outside_top_k(field_name, k, seq_ids); slock.unlock(); if(!op.ok()) { return op; } for(auto seq_id: seq_ids) { auto remove_op = remove_if_found(seq_id); if(!remove_op.ok()) { LOG(ERROR) << "Error while truncating top k: " << remove_op.error(); } } return Option<bool>(true); } Option<bool> Collection::reference_populate_sort_mapping(int *sort_order, std::vector<size_t> &geopoint_indices, std::vector<sort_by> &sort_fields_std, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32> *, 3> &field_values) const { std::shared_lock lock(mutex); return index->populate_sort_mapping_with_lock(sort_order, geopoint_indices, sort_fields_std, field_values); } int64_t Collection::reference_string_sort_score(const string &field_name, const uint32_t& seq_id) const { std::shared_lock lock(mutex); return index->reference_string_sort_score(field_name, seq_id); } bool Collection::is_referenced_in(const std::string& collection_name) const { std::shared_lock lock(mutex); return referenced_in.count(collection_name) > 0; } void Collection::add_referenced_ins(const std::set<reference_info_t>& ref_infos) { std::shared_lock lock(mutex); for (const auto &ref_info: ref_infos) { auto const& referenced_field_name = ref_info.referenced_field_name; auto it = search_schema.find(referenced_field_name); if (referenced_field_name != "id" && it == search_schema.end()) { LOG(ERROR) << "Field `" << referenced_field_name << "` not found in the collection `" << name << "` which is referenced in `" << ref_info.collection << "." << ref_info.field + "`."; continue; } referenced_in.emplace(ref_info.collection, ref_info.field); if (ref_info.is_async) { async_referenced_ins[referenced_field_name].emplace(ref_info.collection, ref_info.field); } } } void Collection::add_referenced_in(const std::string& collection_name, const std::string& field_name, const bool& is_async, const std::string& referenced_field_name) { std::shared_lock lock(mutex); auto it = search_schema.find(referenced_field_name); if (referenced_field_name != "id" && it == search_schema.end()) { LOG(ERROR) << "Field `" << referenced_field_name << "` not found in the collection `" << name << "` which is referenced in `" << collection_name << "." << field_name + "`."; return; } referenced_in.emplace(collection_name, field_name); if (is_async) { async_referenced_ins[referenced_field_name].emplace(collection_name, field_name); } } Option<std::string> Collection::get_referenced_in_field_with_lock(const std::string& collection_name) const { std::shared_lock lock(mutex); return get_referenced_in_field(collection_name); } Option<std::string> Collection::get_referenced_in_field(const std::string& collection_name) const { if (referenced_in.count(collection_name) == 0) { return Option<std::string>(400, "Could not find any field in `" + name + "` referencing the collection `" + collection_name + "`."); } return Option<std::string>(referenced_in.at(collection_name)); } Option<bool> Collection::get_related_ids_with_lock(const std::string& field_name, const uint32_t& seq_id, std::vector<uint32_t>& result) const { std::shared_lock lock(mutex); return index->get_related_ids(field_name, seq_id, result); } Option<uint32_t> Collection::get_sort_index_value_with_lock(const std::string& field_name, const uint32_t& seq_id) const { std::shared_lock lock(mutex); return index->get_sort_index_value_with_lock(field_name, seq_id); } std::shared_mutex& Collection::get_lifecycle_mutex() { return lifecycle_mutex; } void Collection::remove_embedding_field(const std::string& field_name) { if(embedding_fields.find(field_name) == embedding_fields.end()) { return; } const auto& del_field = embedding_fields[field_name]; const auto& model_name = del_field.embed[fields::model_config]["model_name"].get<std::string>(); embedding_fields.erase(field_name); CollectionManager::get_instance().process_embedding_field_delete(model_name); } tsl::htrie_map<char, field> Collection::get_embedding_fields_unsafe() { return embedding_fields; } void Collection::do_housekeeping() { index->repair_hnsw_index(); } Option<bool> Collection::parse_and_validate_vector_query(const std::string& vector_query_str, vector_query_t& vector_query, const bool is_wildcard_query, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries, size_t& per_page) const { auto parse_vector_op = VectorQueryOps::parse_vector_query_str(vector_query_str, vector_query, is_wildcard_query, this, false); if(!parse_vector_op.ok()) { return Option<bool>(400, parse_vector_op.error()); } auto vector_field_it = search_schema.find(vector_query.field_name); if(vector_field_it == search_schema.end() || vector_field_it.value().num_dim == 0) { return Option<bool>(400, "Field `" + vector_query.field_name + "` does not have a vector query index."); } if(!vector_field_it.value().index) { return Option<bool>(400, "Field `" + vector_query.field_name + "` is marked as a non-indexed field in the schema."); } if(!vector_query.queries.empty()) { if(embedding_fields.find(vector_query.field_name) == embedding_fields.end()) { return Option<bool>(400, "`queries` parameter is only supported for auto-embedding fields."); } std::vector<std::vector<float>> embeddings; for(const auto& q: vector_query.queries) { EmbedderManager& embedder_manager = EmbedderManager::get_instance(); auto embedder_op = embedder_manager.get_text_embedder(vector_field_it.value().embed[fields::model_config]); if(!embedder_op.ok()) { return Option<bool>(400, embedder_op.error()); } auto remote_embedding_timeout_us = remote_embedding_timeout_ms * 1000; if((std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > remote_embedding_timeout_us) { std::string error = "Request timed out."; return Option<bool>(500, error); } auto embedder = embedder_op.get(); if(embedder->is_remote()) { if(remote_embedding_num_tries == 0) { std::string error = "`remote_embedding_num_tries` must be greater than 0."; return Option<bool>(400, error); } } std::string embed_query = embedder_manager.get_query_prefix(vector_field_it.value().embed[fields::model_config]) + q; auto embedding_op = embedder->Embed(embed_query, remote_embedding_timeout_ms, remote_embedding_num_tries); if(!embedding_op.success) { if(embedding_op.error.contains("error")) { return Option<bool>(400, embedding_op.error["error"].get<std::string>()); } else { return Option<bool>(400, embedding_op.error.dump()); } } embeddings.emplace_back(embedding_op.embedding); } if(vector_query.query_weights.empty()) { // get average of all embeddings std::vector<float> avg_embedding(vector_field_it.value().num_dim, 0); for(const auto& embedding: embeddings) { for(size_t i = 0; i < embedding.size(); i++) { avg_embedding[i] += embedding[i]; } } for(size_t i = 0; i < avg_embedding.size(); i++) { avg_embedding[i] /= embeddings.size(); } vector_query.values = avg_embedding; } else { std::vector<float> embeddings_with_weights(vector_field_it.value().num_dim, 0); for(size_t i = 0; i < embeddings.size(); i++) { for(size_t j = 0; j < embeddings[i].size(); j++) { embeddings_with_weights[j] += embeddings[i][j] * vector_query.query_weights[i]; } } vector_query.values = embeddings_with_weights; } } if(is_wildcard_query) { if(vector_query.values.empty() && !vector_query.query_doc_given) { // for usability we will treat this as non-vector query vector_query.field_name.clear(); if(vector_query.k != 0) { per_page = std::min(per_page, vector_query.k); } } else if(vector_field_it.value().num_dim != vector_query.values.size()) { return Option<bool>(400, "Query field `" + vector_query.field_name + "` must have " + std::to_string(vector_field_it.value().num_dim) + " dimensions."); } } return Option<bool>(true); } std::shared_ptr<VQModel> Collection::get_vq_model() { return vq_model; }
306,470
C++
.cpp
5,706
38.70715
216
0.538291
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,792
text_embedder_tokenizer.cpp
typesense_typesense/src/text_embedder_tokenizer.cpp
#include <fstream> #include <sstream> #include "text_embedder_tokenizer.h" #include "logger.h" #include <unicode/normalizer2.h> BertTokenizerWrapper::BertTokenizerWrapper(const std::string& vocab_path) { std::ifstream stream(vocab_path); std::stringstream ss; ss << stream.rdbuf(); auto vocab_ = ss.str(); bert_tokenizer_ = std::make_unique<BertTokenizer>(vocab_, true, true, ustring("[UNK]"), ustring("[SEP]"), ustring("[PAD]"), ustring("[CLS]"), ustring("[MASK]"), true, true, ustring("##"),512, std::string("longest_first")); } encoded_input_t BertTokenizerWrapper::Encode(const std::string& text) { auto encoded = bert_tokenizer_->Encode(bert_tokenizer_->Tokenize(ustring(text))); auto input_ids = bert_tokenizer_->AddSpecialToken(encoded); auto token_type_ids = bert_tokenizer_->GenerateTypeId(encoded); auto attention_mask = std::vector<int64_t>(input_ids.size(), 1); // BERT supports max sequence length of 512 if (input_ids.size() > 512) { input_ids.resize(512); token_type_ids.resize(512); attention_mask.resize(512); } return {input_ids, token_type_ids, attention_mask}; } DistilbertTokenizer::DistilbertTokenizer(const std::string& vocab_path) : BertTokenizerWrapper(vocab_path) {} encoded_input_t DistilbertTokenizer::Encode(const std::string& text) { auto encoded = bert_tokenizer_->Encode(bert_tokenizer_->Tokenize(ustring(text))); auto input_ids = bert_tokenizer_->AddSpecialToken(encoded); auto attention_mask = std::vector<int64_t>(input_ids.size(), 1); // DistilBERT supports max sequence length of 512 if (input_ids.size() > 512) { input_ids.resize(512); attention_mask.resize(512); } return {input_ids, {}, attention_mask}; } XLMRobertaTokenizer::XLMRobertaTokenizer(const std::string& model_path) { sentencepiece_tokenizer_ = std::make_unique<sentencepiece::SentencePieceProcessor>(); sentencepiece_tokenizer_->Load(model_path); fairseq_tokens_to_ids_["<mask>"] = sentencepiece_tokenizer_->GetPieceSize() + fairseq_offset; sentencepiece_tokenizer_->SetEncodeExtraOptions("bos:eos"); } const int XLMRobertaTokenizer::token_to_id(const std::string& token) { auto it = fairseq_tokens_to_ids_.find(token); if (it != fairseq_tokens_to_ids_.end()) { return it->second; } auto spm_id = sentencepiece_tokenizer_->PieceToId(token); if (spm_id == 0) { return fairseq_tokens_to_ids_["<unk>"]; } return spm_id + fairseq_offset; } const std::vector<std::string> XLMRobertaTokenizer::tokenize(const std::string& text) { std::vector<std::string> tokens; sentencepiece_tokenizer_->Encode(text, &tokens); return tokens; } encoded_input_t XLMRobertaTokenizer::Encode(const std::string& text) { auto tokens = tokenize(text); auto input_ids = std::vector<int64_t>(tokens.size()); auto attention_mask = std::vector<int64_t>(tokens.size(), 1); for (size_t i = 0; i < tokens.size(); i++) { input_ids[i] = token_to_id(tokens[i]); } // XLM-RoBERTa supports max sequence length of 128 if (input_ids.size() > 128) { input_ids.resize(128); attention_mask.resize(128); input_ids[input_ids.size() - 1] = fairseq_tokens_to_ids_["<eos>"]; } return {input_ids, {}, attention_mask}; } CLIPTokenizerWrapper::CLIPTokenizerWrapper(const std::string& vocab_path) { try { clip_tokenizer_ = std::make_unique<CLIPTokenizer>(vocab_path); } catch (const std::exception& e) { LOG(INFO) << "Failed to load CLIP tokenizer: " << e.what(); throw; } } encoded_input_t CLIPTokenizerWrapper::Encode(const std::string& text) { auto res = clip_tokenizer_->tokenize({text}); // convert vector int to vector int64_t std::vector<int64_t> input_ids(res.tokens[0].begin(), res.tokens[0].end()); std::vector<int64_t> attention_mask(res.attention_mask[0].begin(), res.attention_mask[0].end()); return {input_ids, {}, attention_mask}; }
4,052
C++
.cpp
90
40.077778
127
0.679432
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,793
adi_tree.cpp
typesense_typesense/src/adi_tree.cpp
#include <cstdint> #include <vector> #include "adi_tree.h" #include "logger.h" struct adi_node_t { uint16_t num_children; uint32_t scions; char* chars; adi_node_t** children; adi_node_t(): scions(0), num_children(0), chars(nullptr), children(nullptr) {} ~adi_node_t() { //LOG(INFO) << "~adi_node: " << this; //nodes.erase(this); delete [] chars; chars = nullptr; for(size_t i = 0; i < num_children; i++) { delete children[i]; } delete [] children; children = nullptr; num_children = 0; } }; adi_tree_t::adi_tree_t() { root = new adi_node_t(); } void adi_tree_t::add_node(adi_node_t* node, const std::string& key, const size_t key_index) { char c = (key_index == key.size()) ? '\0' : key[key_index]; // find the slot for `c` size_t slot = 0; bool found_char = false; while(slot < node->num_children) { if(node->chars[slot] == c) { found_char = true; break; } if(node->chars[slot] > c) { break; } slot++; } if(!found_char) { adi_node_t** new_children = new adi_node_t*[node->num_children+1]; char* new_chars = new char[node->num_children+1]; for(size_t i = 0; i < slot; i++) { new_children[i] = node->children[i]; new_chars[i] = node->chars[i]; } new_children[slot] = new adi_node_t(); /*LOG(INFO) << "new node: " << new_children[slot] << ", slot: " << slot << ", parent node: " << node << ", key: " << key << ", char: " << int(c) << ", node->num_children: " << node->num_children;*/ //nodes.insert(new_children[slot]); new_chars[slot] = c; size_t new_index = slot+1; for(size_t i = slot; i < node->num_children; i++) { new_children[new_index] = node->children[i]; new_chars[new_index] = node->chars[i]; new_index++; } delete [] node->chars; delete [] node->children; node->chars = new_chars; node->children = new_children; node->num_children++; } node->scions++; if(c != '\0') { add_node(node->children[slot], key, key_index+1); } else { node->children[slot]->scions++; } } void adi_tree_t::index(const uint32_t id, const std::string& key) { const auto& id_keys_it = id_keys.find(id); if(id_keys_it != id_keys.end()) { return ; } if(key.empty()) { return; } if(root == nullptr) { root = new adi_node_t(); } add_node(root, key, 0); id_keys.emplace(id, key); } bool adi_tree_t::rank_aggregate(adi_node_t* node, const std::string& key, const size_t key_index, size_t& rank) { char c = (key_index == key.size()) ? '\0' : key[key_index]; for(size_t i = 0; i < node->num_children; i++) { if(node->chars[i] == '\0' && c == '\0') { rank += 1; return true; } if(node->chars[i] == '\0') { rank += 1; } else if(node->chars[i] != c) { rank += node->children[i]->scions; } else { return rank_aggregate(node->children[i], key, key_index+1, rank); } } return false; } size_t adi_tree_t::rank(uint32_t id) { const auto& id_keys_it = id_keys.find(id); if(id_keys_it == id_keys.end()) { return NOT_FOUND; } const std::string& key = id_keys_it->second; size_t rank = 0; bool found = rank_aggregate(root, key, 0, rank); return found ? rank : NOT_FOUND; } adi_node_t* adi_tree_t::get_node(adi_node_t* node, const std::string& key, const size_t key_index, std::vector<adi_node_t*>& path) { if(key_index == key.size()) { // still push the null node if(node->num_children >= 1 && node->chars[0] == '\0') { path.push_back(node); path.push_back(node->children[0]); return node; } return nullptr; } for(size_t i = 0; i < node->num_children; i++) { if(node->chars[i] == key[key_index]) { path.push_back(node); return get_node(node->children[i], key, key_index+1, path); } } return nullptr; } // assumes that node already exists void adi_tree_t::remove_node(adi_node_t* node, const std::string& key, const size_t key_index) { char c = (key_index == key.size()) ? '\0' : key[key_index]; for(size_t i = 0; i < node->num_children; i++) { if(node->chars[i] == c) { if(node->children[i]->scions > 1) { // skip to next character in trie as this character is shared by other entries node->scions--; if(c != '\0') { remove_node(node->children[i], key, key_index+1); } else { node->children[i]->scions--; } } else { if(node->num_children == 1) { // solo child, we will have to delete the node itself if(c != '\0') { remove_node(node->children[i], key, key_index+1); node->children[i] = nullptr; } if(root == node) { delete root; root = nullptr; } else { delete node; } } else { // delete the char from node adi_node_t** new_children = new adi_node_t*[node->num_children-1]; char* new_chars = new char[node->num_children-1]; for(size_t j = 0; j < node->num_children; j++) { if(j == i) { continue; } size_t index = (j < i) ? j : j-1; new_children[index] = node->children[j]; new_chars[index] = node->chars[j]; } if(c != '\0') { remove_node(node->children[i], key, key_index+1); } else { delete node->children[i]; } delete [] node->chars; delete [] node->children; node->children = new_children; node->chars = new_chars; node->scions--; node->num_children--; } } break; } } } void adi_tree_t::remove(uint32_t id) { const auto& id_keys_it = id_keys.find(id); if(id_keys_it == id_keys.end()) { return ; } const std::string& key = id_keys_it->second; std::vector<adi_node_t*> path; auto leaf_node = get_node(root, key, 0, path); //LOG(INFO) << "Removing key: " << key << ", seq_id: " << id << ", id_keys.size: " << id_keys.size() // << ", root.num_children: " << root->num_children; if(leaf_node != nullptr) { remove_node(root, key, 0); } id_keys.erase(id); } adi_tree_t::~adi_tree_t() { std::vector<uint32_t> ids; //LOG(INFO) << "ROOT: " << root; for(auto& id_key: id_keys) { ids.push_back(id_key.first); } for(auto id: ids) { remove(id); } //LOG(INFO) << "tree destructor, deleting root: " << root; delete root; //LOG(INFO) << "nodes.size: " << nodes.size(); //auto missing_node = *nodes.begin(); //LOG(INFO) << "missing node: " << missing_node; } const adi_node_t* adi_tree_t::get_root() { return root; }
7,780
C++
.cpp
221
25.117647
113
0.479589
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,794
string_utils.cpp
typesense_typesense/src/string_utils.cpp
#include "string_utils.h" #include <iostream> #include <openssl/evp.h> #include <openssl/hmac.h> #include <random> #include <openssl/sha.h> #include <map> #include <netinet/in.h> #include <arpa/inet.h> #include <join.h> #include "logger.h" StringUtils::StringUtils() { UErrorCode errcode = U_ZERO_ERROR; nfkd = icu::Normalizer2::getNFKDInstance(errcode); } StringUtils::~StringUtils() { } std::string lower_and_no_special_chars(const std::string & str) { std::stringstream ss; for(char c : str) { bool is_ascii = ((int)(c) >= 0); bool keep_char = !is_ascii || std::isalnum(c); if(keep_char) { ss << (char) std::tolower(c); } } return ss.str(); } std::string StringUtils::randstring(size_t length) { static auto& chrs = "0123456789" "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; thread_local std::mt19937 rg(std::random_device{}()); thread_local std::uniform_int_distribution<uint32_t> pick(0, sizeof(chrs) - 2); std::string s; s.reserve(length); while(length--) { s += chrs[pick(rg)]; } return s; } std::string StringUtils::hmac(const std::string& key, const std::string& msg) { unsigned int hmac_len; unsigned char hmac[EVP_MAX_MD_SIZE]; HMAC(EVP_sha256(), key.c_str(), key.size(), reinterpret_cast<const unsigned char *>(msg.c_str()), msg.size(), hmac, &hmac_len); std::string digest_raw(reinterpret_cast<char*>(&hmac), hmac_len); return StringUtils::base64_encode(digest_raw); } std::string StringUtils::str2hex(const std::string &str, bool capital) { std::string hexstr; hexstr.resize(str.size() * 2); const size_t a = capital ? 'A' - 1 : 'a' - 1; for (size_t i = 0, c = str[0] & 0xFF; i < hexstr.size(); c = str[i / 2] & 0xFF) { hexstr[i++] = c > 0x9F ? (c / 16 - 9) | a : c / 16 | '0'; hexstr[i++] = (c & 0xF) > 9 ? (c % 16 - 9) | a : c % 16 | '0'; } return hexstr; } std::string StringUtils::hash_sha256(const std::string& str) { const size_t SHA256_SIZE = 32; unsigned char hash_buf[SHA256_SIZE]; SHA256(reinterpret_cast<const unsigned char *>(str.c_str()), str.size(), hash_buf); return StringUtils::str2hex(std::string(reinterpret_cast<char*>(hash_buf), SHA256_SIZE)); } void StringUtils::parse_query_string(const std::string& query, std::map<std::string, std::string>& query_map) { std::string key_value; int query_len = int(query.size()); int i = 0; if(query[0] == '?') { i++; } while(i < query_len) { // we have to support un-encoded "&&" in the query string value, which makes things a bit more complex bool start_of_new_param = query[i] == '&' && (i != query_len - 1 && query[i + 1] != '&') && (i != 0 && query[i - 1] != '&'); bool end_of_params = (i == query_len - 1); if(start_of_new_param || end_of_params) { // Save accumulated key_value if(end_of_params && query[i] != '&') { key_value += query[i]; } size_t j = 0; bool iterating_on_key = true; std::string key; std::string value; while(j < key_value.size()) { if(key_value[j] == '=' && iterating_on_key) { iterating_on_key = false; } else if(iterating_on_key) { key += key_value[j]; } else { value += key_value[j]; } j++; } if(!key.empty() && key != "&") { value = StringUtils::url_decode(value); if (query_map.count(key) == 0) { query_map[key] = value; } else if (key == "filter_by") { query_map[key] = query_map[key] + "&&" + value; } else { query_map[key] = value; } } key_value = ""; } else { key_value += query[i]; } i++; } } void StringUtils::split_to_values(const std::string& vals_str, std::vector<std::string>& filter_values) { size_t i = 0; bool inside_tick = false; std::string buffer; buffer.reserve(20); while(i < vals_str.size()) { char c = vals_str[i]; bool escaped_tick = (i != 0) && c == '`' && vals_str[i-1] == '\\'; switch(c) { case '`': if(escaped_tick) { buffer += c; } else if(inside_tick && !buffer.empty()) { inside_tick = false; } else { inside_tick = true; } break; case ',': if(!inside_tick) { filter_values.push_back(StringUtils::trim(buffer)); buffer = ""; } else { buffer += c; } break; default: buffer += c; } i++; } if(!buffer.empty()) { filter_values.push_back(StringUtils::trim(buffer)); } } std::string StringUtils::float_to_str(float value) { std::ostringstream os; os << value; return os.str(); } std::string StringUtils::unicode_nfkd(const std::string& text) { UErrorCode errcode = U_ZERO_ERROR; icu::UnicodeString src = icu::UnicodeString::fromUTF8(text); icu::UnicodeString dst; nfkd->normalize(src, dst, errcode); if(!U_FAILURE(errcode)) { std::string output; dst.toUTF8String(output); return output; } else { LOG(ERROR) << "Unicode error during parsing: " << errcode; return text; } } void StringUtils::replace_all(std::string& subject, const std::string& search, const std::string& replace) { if(search.empty()) { return ; } size_t pos = 0; while ((pos = subject.find(search, pos)) != std::string::npos) { subject.replace(pos, search.length(), replace); pos += replace.length(); } } void StringUtils::erase_char(std::string& str, const char c) { str.erase(std::remove(str.begin(), str.end(), c), str.cend()); } std::string StringUtils::trim_curly_spaces(const std::string& str) { std::string left_trimmed; int i = 0; bool inside_curly = false; while(i < str.size()) { switch (str[i]) { case '{': left_trimmed += str[i]; inside_curly = true; break; case '}': left_trimmed += str[i]; inside_curly = false; break; case ' ': if(!inside_curly) { left_trimmed += str[i]; inside_curly = false; } break; default: left_trimmed += str[i]; inside_curly = false; } i++; } std::string right_trimmed; i = left_trimmed.size()-1; inside_curly = false; while(i >= 0) { switch (left_trimmed[i]) { case '}': right_trimmed += left_trimmed[i]; inside_curly = true; break; case '{': right_trimmed += left_trimmed[i]; inside_curly = false; break; case ' ': if(!inside_curly) { right_trimmed += left_trimmed[i]; inside_curly = false; } break; default: right_trimmed += left_trimmed[i]; inside_curly = false; } i--; } std::reverse(right_trimmed.begin(), right_trimmed.end()); return right_trimmed; } bool StringUtils::ends_with(const std::string& str, const std::string& ending) { if (str.length() >= ending.length()) { return (0 == str.compare (str.length() - ending.length(), ending.length(), ending)); } else { return false; } } bool StringUtils::contains_word(const std::string& haystack, const std::string& needle) { size_t pos = haystack.find(needle); if(pos == std::string::npos) { return false; } if(pos == 0 && haystack.size() == needle.size()) { return true; } if(pos != 0 && haystack[pos - 1] != ' ') { return false; } size_t end_pos = pos + needle.size(); if(end_pos < haystack.size() and haystack[end_pos] != ' ') { return false; } return true; } char* StringUtils::get_ip_str(const struct sockaddr* sa, char* s, size_t maxlen) { switch (sa->sa_family) { case AF_INET: inet_ntop(AF_INET, &(((struct sockaddr_in*) sa)->sin_addr), s, maxlen); break; case AF_INET6: inet_ntop(AF_INET6, &(((struct sockaddr_in6*) sa)->sin6_addr), s, maxlen); break; default: strncpy(s, "Unknown AF", maxlen); return NULL; } return s; } size_t StringUtils::get_num_chars(const std::string& s) { // finds number of unicode points in given string size_t i = 0, j = 0; while (s[i]) { if ((s[i] & 0xC0) != 0x80) { j++; } i++; } return j; } Option<bool> parse_multi_valued_geopoint_filter(const std::string& filter_query, std::string& tokens, size_t& index) { // Multi-valued geopoint filter. // field_name:[ ([points], options), ([points]) ] auto error = Option<bool>(400, "Could not parse the geopoint filter."); if (filter_query[index] != '[') { return error; } size_t start_index = index; auto size = filter_query.size(); // Individual geopoint filters have square brackets inside them. int square_bracket_count = 1; while (++index < size && square_bracket_count > 0) { if (filter_query[index] == '[') { square_bracket_count++; } else if (filter_query[index] == ']') { square_bracket_count--; } } if (square_bracket_count != 0) { return error; } tokens = filter_query.substr(start_index, index - start_index); return Option<bool>(true); } Option<bool> StringUtils::tokenize_filter_query(const std::string& filter_query, std::queue<std::string>& tokens) { std::set<std::string> ref_collection_names; auto size = filter_query.size(); for (size_t i = 0; i < size;) { auto c = filter_query[i]; if (c == ' ') { i++; continue; } if (c == '(') { tokens.push("("); i++; } else if (c == ')') { tokens.push(")"); i++; } else if (c == '&') { if (i + 1 >= size || filter_query[i + 1] != '&') { return Option<bool>(400, "Could not parse the filter filter_query."); } tokens.push("&&"); i += 2; } else if (c == '|') { if (i + 1 >= size || filter_query[i + 1] != '|') { return Option<bool>(400, "Could not parse the filter filter_query."); } tokens.push("||"); i += 2; } else { // Reference filter would start with $ symbol. if (c == '$') { auto op = Join::parse_reference_filter(filter_query, tokens, i, ref_collection_names); if (!op.ok()) { return op; } continue; } std::stringstream ss; bool inBacktick = false; bool preceding_colon = false; bool is_geo_value = false; do { if (c == ':') { preceding_colon = true; } if (c == ')' && is_geo_value) { is_geo_value = false; } ss << c; c = filter_query[++i]; if (c == '`') { inBacktick = !inBacktick; } if (preceding_colon && c == '(') { is_geo_value = true; preceding_colon = false; } else if (preceding_colon && c == '[') { std::string value; auto op = parse_multi_valued_geopoint_filter(filter_query, value, i); if (!op.ok()) { return op; } ss << value; break; } else if (preceding_colon && c != ' ') { preceding_colon = false; } } while (i < size && (inBacktick || is_geo_value || (c != '(' && c != ')' && !(c == '&' && filter_query[i + 1] == '&') && !(c == '|' && filter_query[i + 1] == '|')))); auto token = ss.str(); trim(token); tokens.push(token); } } return Option<bool>(true); } Option<bool> StringUtils::split_include_exclude_fields(const std::string& include_exclude_fields, std::vector<std::string>& tokens) { std::string token; auto const& size = include_exclude_fields.size(); for (size_t i = 0; i < size;) { auto c = include_exclude_fields[i]; if (c == ' ') { i++; continue; } else if (c == '$') { // Reference include/exclude std::string ref_include_token; auto split_op = Join::split_reference_include_exclude_fields(include_exclude_fields, i, ref_include_token); if (!split_op.ok()) { return split_op; } tokens.push_back(ref_include_token); continue; } auto comma_pos = include_exclude_fields.find(',', i); token = include_exclude_fields.substr(i, (comma_pos == std::string::npos ? size : comma_pos) - i); trim(token); if (!token.empty()) { tokens.push_back(token); } if (comma_pos == std::string::npos) { break; } i = comma_pos + 1; token.clear(); } return Option<bool>(true); } size_t StringUtils::split_facet(const std::string &s, std::vector<std::string> &result, const bool keep_empty, const size_t start_index, const size_t max_values) { std::string::const_iterator substart = s.begin()+start_index, subend; size_t end_index = start_index; std::string delim(""), temp(""); std::string current_str=s; while (true) { auto range_pos = current_str.find("("); auto normal_pos = current_str.find(","); if(range_pos == std::string::npos && normal_pos == std::string::npos){ if(!current_str.empty()){ result.push_back(trim(current_str)); } break; } else if(range_pos < normal_pos){ delim="),"; subend = std::search(substart, s.end(), delim.begin(), delim.end()); temp = std::string(substart, subend) + (*subend == ')' ? ")" : ""); } else{ delim=","; subend = std::search(substart, s.end(), delim.begin(), delim.end()); temp = std::string(substart, subend); } end_index += temp.size() + delim.size(); temp = trim(temp); if (keep_empty || !temp.empty()) { result.push_back(temp); } if(result.size() == max_values) { break; } if (subend == s.end()) { break; } substart = subend + delim.size(); current_str = std::string(substart, s.end()); } return std::min(end_index, s.size()); } /*size_t StringUtils::unicode_length(const std::string& bytes) { std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> utf8conv; return utf8conv.from_bytes(bytes).size(); }*/ size_t StringUtils::get_occurence_count(const std::string &str, char symbol) { return std::count(str.begin(), str.end(), symbol); }
16,367
C++
.cpp
465
25.098925
119
0.496742
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,795
filter_result_iterator.cpp
typesense_typesense/src/filter_result_iterator.cpp
#include <memory> #include <queue> #include <id_list.h> #include <s2/s2point.h> #include <s2/s2latlng.h> #include <s2/s2region_term_indexer.h> #include <s2/s2cap.h> #include <s2/s2earth.h> #include <s2/s2loop.h> #include <s2/s2builder.h> #include <timsort.hpp> #include "filter_result_iterator.h" #include "index.h" #include "posting.h" #include "collection_manager.h" void copy_references_helper(const std::map<std::string, reference_filter_result_t>* from, std::map<std::string, reference_filter_result_t>*& to, const uint32_t& count) { if (from == nullptr) { return; } to = new std::map<std::string, reference_filter_result_t>[count] {}; for (uint32_t i = 0; i < count; i++) { if (from[i].empty()) { continue; } auto& ref = to[i]; ref.insert(from[i].begin(), from[i].end()); } } void reference_filter_result_t::copy_references(const reference_filter_result_t& from, reference_filter_result_t& to) { return copy_references_helper(from.coll_to_references, to.coll_to_references, from.count); } void filter_result_t::copy_references(const filter_result_t& from, filter_result_t& to) { return copy_references_helper(from.coll_to_references, to.coll_to_references, from.count); } void filter_result_t::and_filter_results(const filter_result_t& a, const filter_result_t& b, filter_result_t& result) { auto lenA = a.count, lenB = b.count; if (lenA == 0 || lenB == 0) { return; } result.docs = new uint32_t[std::min(lenA, lenB)]; auto A = a.docs, B = b.docs, out = result.docs; const uint32_t *endA = A + lenA; const uint32_t *endB = B + lenB; if (a.coll_to_references != nullptr || b.coll_to_references != nullptr) { result.coll_to_references = new std::map<std::string, reference_filter_result_t>[std::min(lenA, lenB)] {}; } while (true) { while (*A < *B) { SKIP_FIRST_COMPARE: if (++A == endA) { result.count = out - result.docs; return; } } while (*A > *B) { if (++B == endB) { result.count = out - result.docs; return; } } if (*A == *B) { *out = *A; if (result.coll_to_references != nullptr) { // Copy the references of the document from every collection into result. auto& ref = result.coll_to_references[out - result.docs]; if (a.coll_to_references != nullptr) { ref.insert(a.coll_to_references[A - a.docs].begin(), a.coll_to_references[A - a.docs].end()); } if (b.coll_to_references != nullptr) { ref.insert(b.coll_to_references[B - b.docs].begin(), b.coll_to_references[B - b.docs].end()); } } out++; if (++A == endA || ++B == endB) { result.count = out - result.docs; return; } } else { goto SKIP_FIRST_COMPARE; } } } void filter_result_t::or_filter_results(const filter_result_t& a, const filter_result_t& b, filter_result_t& result) { if (a.count == 0 && b.count == 0) { return; } // If either one of a or b does not have any matches, copy other into result. if (a.count == 0) { result = b; return; } if (b.count == 0) { result = a; return; } size_t indexA = 0, indexB = 0, res_index = 0, lenA = a.count, lenB = b.count; result.docs = new uint32_t[lenA + lenB]; if (a.coll_to_references != nullptr || b.coll_to_references != nullptr) { result.coll_to_references = new std::map<std::string, reference_filter_result_t>[lenA + lenB] {}; } while (indexA < lenA && indexB < lenB) { if (a.docs[indexA] < b.docs[indexB]) { // check for duplicate if (res_index == 0 || result.docs[res_index - 1] != a.docs[indexA]) { result.docs[res_index] = a.docs[indexA]; res_index++; } if (a.coll_to_references != nullptr) { // Copy references of the last result document from every collection in a. auto &ref = result.coll_to_references[res_index - 1]; ref.insert(a.coll_to_references[indexA].begin(), a.coll_to_references[indexA].end()); } indexA++; } else { if (res_index == 0 || result.docs[res_index - 1] != b.docs[indexB]) { result.docs[res_index] = b.docs[indexB]; res_index++; } if (b.coll_to_references != nullptr) { auto &ref = result.coll_to_references[res_index - 1]; ref.insert(b.coll_to_references[indexB].begin(), b.coll_to_references[indexB].end()); } indexB++; } } while (indexA < lenA) { if (res_index == 0 || result.docs[res_index - 1] != a.docs[indexA]) { result.docs[res_index] = a.docs[indexA]; res_index++; } if (a.coll_to_references != nullptr) { auto &ref = result.coll_to_references[res_index - 1]; ref.insert(a.coll_to_references[indexA].begin(), a.coll_to_references[indexA].end()); } indexA++; } while (indexB < lenB) { if(res_index == 0 || result.docs[res_index - 1] != b.docs[indexB]) { result.docs[res_index] = b.docs[indexB]; res_index++; } if (b.coll_to_references != nullptr) { auto &ref = result.coll_to_references[res_index - 1]; ref.insert(b.coll_to_references[indexB].begin(), b.coll_to_references[indexB].end()); } indexB++; } result.count = res_index; if (res_index == lenA + lenB) { return; } // shrink fit auto out = new uint32_t[res_index]; memcpy(out, result.docs, res_index * sizeof(uint32_t)); delete[] result.docs; result.docs = out; if (result.coll_to_references == nullptr) { return; } auto out_references = new std::map<std::string, reference_filter_result_t>[res_index] {}; for (uint32_t i = 0; i < res_index; i++) { auto& ref = out_references[i]; ref.insert(result.coll_to_references[i].begin(), result.coll_to_references[i].end()); } delete[] result.coll_to_references; result.coll_to_references = out_references; } void filter_result_iterator_t::and_filter_iterators() { while (left_it->validity && right_it->validity) { if (left_it->seq_id < right_it->seq_id) { auto const& left_validity = left_it->is_valid(right_it->seq_id); if (left_validity == 1) { seq_id = right_it->seq_id; reference.clear(); for (const auto& item: left_it->reference) { reference[item.first] = item.second; } for (const auto& item: right_it->reference) { reference[item.first] = item.second; } return; } if (left_validity == -1) { validity = invalid; return; } } if (left_it->seq_id > right_it->seq_id) { auto const& right_validity = right_it->is_valid(left_it->seq_id); if (right_validity == 1) { seq_id = left_it->seq_id; reference.clear(); for (const auto& item: left_it->reference) { reference[item.first] = item.second; } for (const auto& item: right_it->reference) { reference[item.first] = item.second; } return; } if (right_validity == -1) { validity = invalid; return; } continue; } if (left_it->seq_id == right_it->seq_id) { seq_id = left_it->seq_id; reference.clear(); for (const auto& item: left_it->reference) { reference[item.first] = item.second; } for (const auto& item: right_it->reference) { reference[item.first] = item.second; } return; } } validity = invalid; } void filter_result_iterator_t::or_filter_iterators() { if (left_it->validity && right_it->validity) { if (left_it->seq_id < right_it->seq_id) { seq_id = left_it->seq_id; reference.clear(); for (const auto& item: left_it->reference) { reference[item.first] = item.second; } return; } if (left_it->seq_id > right_it->seq_id) { seq_id = right_it->seq_id; reference.clear(); for (const auto& item: right_it->reference) { reference[item.first] = item.second; } return; } seq_id = left_it->seq_id; reference.clear(); for (const auto& item: left_it->reference) { reference[item.first] = item.second; } for (const auto& item: right_it->reference) { reference[item.first] = item.second; } return; } if (left_it->validity) { seq_id = left_it->seq_id; reference.clear(); for (const auto& item: left_it->reference) { reference[item.first] = item.second; } return; } if (right_it->validity) { seq_id = right_it->seq_id; reference.clear(); for (const auto& item: right_it->reference) { reference[item.first] = item.second; } return; } validity = invalid; } void filter_result_iterator_t::advance_string_filter_token_iterators() { for (uint32_t i = 0; i < posting_list_iterators.size(); i++) { auto& filter_value_tokens = posting_list_iterators[i]; if (!filter_value_tokens[0].valid() || filter_value_tokens[0].id() != seq_id) { continue; } for (auto& iter: filter_value_tokens) { if (iter.valid()) { iter.next(); } } } } void filter_result_iterator_t::get_string_filter_next_match(const bool& field_is_array) { // If none of the filter value iterators are valid, mark this node as invalid. bool one_is_valid = false; // Since we do OR between filter values, the lowest seq_id id from all is selected. uint32_t lowest_id = UINT32_MAX; if (filter_node->filter_exp.comparators[0] == EQUALS || filter_node->filter_exp.comparators[0] == NOT_EQUALS) { bool match_found = false; switch (posting_list_iterators.size()) { case 1: while(true) { // Perform AND between tokens of a filter value. posting_list_t::intersect(posting_list_iterators[0], one_is_valid); if (!one_is_valid) { break; } match_found = string_prefix_filter_index.count(0) == 0 ? posting_list_t::has_exact_match(posting_list_iterators[0], field_is_array) : posting_list_t::has_prefix_match(posting_list_iterators[0], field_is_array); if (match_found) { break; } // Keep advancing token iterators till match is not found. for (auto& iter: posting_list_iterators[0]) { if (!iter.valid()) { break; } iter.next(); } } if (one_is_valid && match_found) { lowest_id = posting_list_iterators[0][0].id(); } break; default : for (uint32_t i = 0; i < posting_list_iterators.size(); i++) { auto& filter_value_tokens = posting_list_iterators[i]; bool tokens_iter_is_valid; while(true) { // Perform AND between tokens of a filter value. posting_list_t::intersect(filter_value_tokens, tokens_iter_is_valid); if (!tokens_iter_is_valid) { break; } match_found = string_prefix_filter_index.count(i) == 0 ? posting_list_t::has_exact_match(filter_value_tokens, field_is_array) : posting_list_t::has_prefix_match(filter_value_tokens, field_is_array); if (match_found) { break; } // Keep advancing token iterators till exact match is not found. for (auto &iter: filter_value_tokens) { if (!iter.valid()) { break; } iter.next(); } } one_is_valid = tokens_iter_is_valid || one_is_valid; if (tokens_iter_is_valid && match_found && filter_value_tokens[0].id() < lowest_id) { lowest_id = filter_value_tokens[0].id(); } } } } else { switch (posting_list_iterators.size()) { case 1: // Perform AND between tokens of a filter value. posting_list_t::intersect(posting_list_iterators[0], one_is_valid); if (one_is_valid) { lowest_id = posting_list_iterators[0][0].id(); } break; default: for (auto& filter_value_tokens : posting_list_iterators) { // Perform AND between tokens of a filter value. bool tokens_iter_is_valid; posting_list_t::intersect(filter_value_tokens, tokens_iter_is_valid); one_is_valid = tokens_iter_is_valid || one_is_valid; if (tokens_iter_is_valid && filter_value_tokens[0].id() < lowest_id) { lowest_id = filter_value_tokens[0].id(); } } } } if (one_is_valid) { equals_iterator_id = seq_id = lowest_id; } is_equals_iterator_valid = one_is_valid; validity = one_is_valid || is_not_equals_iterator ? valid : invalid; } void filter_result_iterator_t::advance_numeric_filter_iterators() { auto one_is_valid = false; for (uint32_t i = 0; i < id_list_iterators.size(); i++) { if (seq_ids[i] > seq_id || numerical_not_iterator_index.count(i) > 0) { one_is_valid = true; continue; } auto& its = id_list_iterators[i]; // Iterators get ORed, so we only advance the iterator that is at seq_id. switch (its.size()) { case 0: continue; case 1: if (!its[0].valid()) { its.clear(); continue; } if (its[0].id() == seq_id) { its[0].next(); } if (its[0].valid()) { seq_ids[i] = its[0].id(); one_is_valid = true; } continue; case 2: if (!its[0].valid() && !its[1].valid()) { its.clear(); continue; } if (its[0].valid() && its[0].id() == seq_id) { its[0].next(); } if (its[1].valid() && its[1].id() == seq_id) { its[1].next(); } if (its[0].valid() && its[1].valid()) { seq_ids[i] = its[0].id() < its[1].id() ? its[0].id() : its[1].id(); one_is_valid = true; } else if (its[0].valid()) { seq_ids[i] = its[0].id(); one_is_valid = true; } else if (its[1].valid()) { seq_ids[i] = its[1].id(); one_is_valid = true; } continue; default: auto is_valid = false; auto lowest_id = UINT32_MAX; for (auto it = its.begin(); it != its.end();) { if (!it->valid()) { it = its.erase(it); continue; } if (it->id() == seq_id) { it->next(); } if (it->valid()) { lowest_id = std::min(it->id(), lowest_id); is_valid = true; it++; } else { it = its.erase(it); } } if (is_valid) { seq_ids[i] = lowest_id; one_is_valid = true; } } } is_equals_iterator_valid = one_is_valid; validity = one_is_valid ? valid : invalid; } void filter_result_iterator_t::get_numeric_filter_match(const bool init) { if (init) { seq_id = 0; // Initialize seq_ids and get the first match. auto one_is_valid = false; for (uint32_t i = 0; i < id_list_iterators.size(); i++) { if (numerical_not_iterator_index.count(i) > 0) { seq_ids[i] = 0; one_is_valid = true; continue; } // Iterators get ORed, so the lowest id is the match. auto& its = id_list_iterators[i]; switch (its.size()) { case 0: continue; case 1: if (its[0].valid()) { seq_ids[i] = its[0].id(); one_is_valid = true; } else { its.clear(); } continue; case 2: if (its[0].valid() && its[1].valid()) { seq_ids[i] = its[0].id() < its[1].id() ? its[0].id() : its[1].id(); one_is_valid = true; } else if (its[0].valid()) { seq_ids[i] = its[0].id(); one_is_valid = true; } else if (its[1].valid()) { seq_ids[i] = its[1].id(); one_is_valid = true; } else { its.clear(); } continue; default: auto is_valid = false; auto lowest_id = UINT32_MAX; for (auto it = its.begin(); it != its.end();) { if (it->valid()) { lowest_id = std::min(it->id(), lowest_id); is_valid = true; it++; } else { it = its.erase(it); } } if (is_valid) { seq_ids[i] = lowest_id; one_is_valid = true; } } } if (!one_is_valid) { validity = invalid; return; } } // Multiple filter values get ORed, so the lowest id is the match. auto one_is_valid = false; auto lowest_id = UINT32_MAX; for (const auto& id: seq_ids) { if (id < seq_id || (!init && id == seq_id)) { continue; } lowest_id = std::min(id, lowest_id); one_is_valid = true; } if (one_is_valid) { equals_iterator_id = seq_id = lowest_id; } validity = one_is_valid ? valid : invalid; } void filter_result_iterator_t::next() { if (validity != valid) { return; } if (timeout_info != nullptr && is_timed_out()) { return; } // No need to traverse iterator tree if there's only one filter or compute_iterators() has been called. if (is_filter_result_initialized) { if (++result_index >= filter_result.count) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; reference.clear(); if (filter_result.coll_to_references != nullptr) { auto& ref = filter_result.coll_to_references[result_index]; reference.insert(ref.begin(), ref.end()); } return; } if (filter_node->isOperator) { // Advance the subtrees and then apply operators to arrive at the next valid doc. if (filter_node->filter_operator == AND) { left_it->next(); right_it->next(); and_filter_iterators(); } else { if (left_it->seq_id == seq_id && right_it->seq_id == seq_id) { left_it->next(); right_it->next(); } else if (left_it->seq_id == seq_id) { left_it->next(); } else if (right_it->seq_id == seq_id) { right_it->next(); } or_filter_iterators(); } return; } const filter a_filter = filter_node->filter_exp; if (a_filter.field_name == "id") { all_seq_ids_iterator.next(); if (!all_seq_ids_iterator.valid()) { validity = invalid; return; } seq_id = all_seq_ids_iterator.id(); return; } if (!index->field_is_indexed(a_filter.field_name)) { validity = invalid; return; } field f = index->search_schema.at(a_filter.field_name); if (is_not_equals_iterator) { return; } if (f.is_integer() || f.is_float()) { advance_numeric_filter_iterators(); get_numeric_filter_match(); return; } else if (f.is_bool()) { bool_iterator.next(); if (!bool_iterator.is_valid) { validity = invalid; return; } equals_iterator_id = seq_id = bool_iterator.seq_id; return; } else if (f.is_string()) { advance_string_filter_token_iterators(); get_string_filter_next_match(f.is_array()); return; } } void numeric_not_equals_filter(num_tree_t* const num_tree, const int64_t value, uint32_t*&& all_ids, uint32_t&& all_ids_length, uint32_t*& result_ids, size_t& result_ids_len) { uint32_t* to_exclude_ids = nullptr; size_t to_exclude_ids_len = 0; num_tree->search(EQUALS, value, &to_exclude_ids, to_exclude_ids_len); result_ids_len = ArrayUtils::exclude_scalar(all_ids, all_ids_length, to_exclude_ids, to_exclude_ids_len, &result_ids); delete[] all_ids; delete[] to_exclude_ids; } void apply_not_equals(uint32_t*&& all_ids, uint32_t&& all_ids_length, uint32_t*& result_ids, uint32_t& result_ids_len) { uint32_t* to_include_ids = nullptr; size_t to_include_ids_len = 0; to_include_ids_len = ArrayUtils::exclude_scalar(all_ids, all_ids_length, result_ids, result_ids_len, &to_include_ids); delete[] all_ids; delete[] result_ids; result_ids = to_include_ids; result_ids_len = to_include_ids_len; } void filter_result_iterator_t::init(const bool& enable_lazy_evaluation) { if (filter_node == nullptr) { return; } if (filter_node->isOperator) { if (filter_node->filter_operator == AND) { approx_filter_ids_length = std::min(left_it->approx_filter_ids_length, right_it->approx_filter_ids_length); if (approx_filter_ids_length < COMPUTE_FILTER_ITERATOR_THRESHOLD) { compute_iterators(); } else { and_filter_iterators(); } } else { or_filter_iterators(); approx_filter_ids_length = std::max(left_it->approx_filter_ids_length, right_it->approx_filter_ids_length); } // Rearranging the subtree in hope to reduce computation if/when compute_iterators() is called. if (!is_filter_result_initialized && left_it->approx_filter_ids_length > right_it->approx_filter_ids_length) { std::swap(left_it, right_it); } return; } const filter a_filter = filter_node->filter_exp; bool is_referenced_filter = !a_filter.referenced_collection_name.empty(); if (is_referenced_filter) { // Apply filter on referenced collection and get the sequence ids of current collection from the filtered documents. auto& cm = CollectionManager::get_instance(); auto ref_collection_name = a_filter.referenced_collection_name; auto ref_collection = cm.get_collection(ref_collection_name); if (ref_collection == nullptr) { status = Option<bool>(400, "Referenced collection `" + ref_collection_name + "` not found."); validity = invalid; return; } // `CollectionManager::get_collection` accounts for collection alias being used and provides pointer to the // original collection. ref_collection_name = ref_collection->name; auto coll = cm.get_collection(collection_name); if (coll == nullptr) { status = Option<bool>(400, "Collection `" + collection_name + "` not found."); validity = invalid; return; } bool is_referenced = coll->referenced_in.count(ref_collection_name) > 0, has_reference = ref_collection->is_referenced_in(collection_name); if (!is_referenced && !has_reference) { status = Option<bool>(400, "Failed to join on `" + ref_collection_name + "`: No reference field found."); validity = invalid; return; } if (is_referenced) { auto const& field_name = coll->referenced_in.at(ref_collection_name); auto reference_filter_op = ref_collection->get_reference_filter_ids(a_filter.field_name, filter_result, field_name); if (!reference_filter_op.ok()) { status = Option<bool>(400, "Failed to join on `" + a_filter.referenced_collection_name + "` collection: " + reference_filter_op.error()); validity = invalid; return; } } else if (has_reference) { // Get the doc ids of reference collection matching the filter then apply filter on the current collection's // reference helper field. filter_result_t result; auto reference_filter_op = ref_collection->get_filter_ids(a_filter.field_name, result); if (!reference_filter_op.ok()) { status = Option<bool>(400, "Failed to join on `" + a_filter.referenced_collection_name + "` collection: " + reference_filter_op.error()); validity = invalid; return; } auto get_reference_field_op = ref_collection->get_referenced_in_field_with_lock(collection_name); if (!get_reference_field_op.ok()) { status = Option<bool>(get_reference_field_op.code(), get_reference_field_op.error()); validity = invalid; return; } auto const& ref_field_name = get_reference_field_op.get(); auto op = index->do_filtering_with_reference_ids(ref_field_name, ref_collection_name, std::move(result)); if (!op.ok()) { status = Option<bool>(op.code(), op.error()); validity = invalid; return; } filter_result = op.get(); } if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; if (filter_result.coll_to_references != nullptr) { auto& ref = filter_result.coll_to_references[result_index]; reference.insert(ref.begin(), ref.end()); } is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; return; } if (a_filter.field_name == "id") { // we handle `ids` separately if (!a_filter.values.empty() && a_filter.values.front() == "*") { if (a_filter.apply_not_equals) { is_filter_result_initialized = true; validity = invalid; return; } if (enable_lazy_evaluation) { all_seq_ids_iterator = index->seq_ids->new_iterator(); if (all_seq_ids_iterator.valid()) { seq_id = all_seq_ids_iterator.id(); approx_filter_ids_length = index->seq_ids->num_ids(); validity = valid; } else { validity = invalid; } return; } else { filter_result.count = index->seq_ids->num_ids(); filter_result.docs = index->seq_ids->uncompress(); } } else { std::vector<uint32_t> result_ids; for (const auto& id_str : a_filter.values) { result_ids.push_back(std::stoul(id_str)); } std::sort(result_ids.begin(), result_ids.end()); filter_result.count = result_ids.size(); filter_result.docs = new uint32_t[result_ids.size()]; std::copy(result_ids.begin(), result_ids.end(), filter_result.docs); } if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } is_filter_result_initialized = true; if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; approx_filter_ids_length = filter_result.count; return; } if (!index->field_is_indexed(a_filter.field_name)) { status = Option<bool>(400, "Cannot filter on non-indexed field `" + a_filter.field_name + "`."); validity = invalid; return; } field f = index->search_schema.at(a_filter.field_name); if (f.is_integer()) { if (f.range_index) { auto const& trie = index->range_index.at(a_filter.field_name); for (size_t fi = 0; fi < a_filter.values.size(); fi++) { const std::string& filter_value = a_filter.values[fi]; auto const& value = (int64_t)std::stol(filter_value); if (a_filter.comparators[fi] == RANGE_INCLUSIVE && fi+1 < a_filter.values.size()) { const std::string& next_filter_value = a_filter.values[fi + 1]; auto const& range_end_value = (int64_t)std::stol(next_filter_value); trie->search_range(value, true, range_end_value, true, filter_result.docs, filter_result.count); fi++; } else if (a_filter.comparators[fi] == EQUALS) { trie->search_equal_to(value, filter_result.docs, filter_result.count); } else if (a_filter.comparators[fi] == NOT_EQUALS) { uint32_t* to_exclude_ids = nullptr; uint32_t to_exclude_ids_len = 0; trie->search_equal_to(value, to_exclude_ids, to_exclude_ids_len); auto all_ids = index->seq_ids->uncompress(); filter_result.count = ArrayUtils::exclude_scalar(all_ids, index->seq_ids->num_ids(), to_exclude_ids, to_exclude_ids_len, &filter_result.docs); delete[] all_ids; delete[] to_exclude_ids; } else if (a_filter.comparators[fi] == GREATER_THAN || a_filter.comparators[fi] == GREATER_THAN_EQUALS) { trie->search_greater_than(value, a_filter.comparators[fi] == GREATER_THAN_EQUALS, filter_result.docs, filter_result.count); } else if (a_filter.comparators[fi] == LESS_THAN || a_filter.comparators[fi] == LESS_THAN_EQUALS) { trie->search_less_than(value, a_filter.comparators[fi] == LESS_THAN_EQUALS, filter_result.docs, filter_result.count); } } if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; } else { auto const& filter_values_count = a_filter.values.size(); auto const& num_tree = index->numerical_index.at(a_filter.field_name); size_t i = 0; for (size_t fi = 0; fi < filter_values_count; fi++, i++) { const std::string& filter_value = a_filter.values[fi]; auto const value = (int64_t)std::stol(filter_value); auto const& comparator = a_filter.comparators[fi]; if (enable_lazy_evaluation && comparator == NOT_EQUALS) { numerical_not_iterator_index.emplace(i); } std::vector<void*> raw_id_lists; if (comparator == RANGE_INCLUSIVE && fi+1 < filter_values_count) { const std::string& next_filter_value = a_filter.values[fi + 1]; auto const range_end_value = (int64_t)std::stol(next_filter_value); if (enable_lazy_evaluation) { raw_id_lists = num_tree->search(comparator, value, range_end_value); } else { num_tree->range_inclusive_search(value, range_end_value, &filter_result.docs, reinterpret_cast<size_t &>(filter_result.count)); } fi++; } else { if (enable_lazy_evaluation) { raw_id_lists = num_tree->search(comparator, value); } else if (a_filter.comparators[fi] == NOT_EQUALS) { numeric_not_equals_filter(num_tree, value, index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, reinterpret_cast<size_t &>(filter_result.count)); } else { num_tree->search(a_filter.comparators[fi], value, &filter_result.docs, reinterpret_cast<size_t &>(filter_result.count)); } } if (enable_lazy_evaluation) { std::vector<id_list_t*> lists; ids_t::to_expanded_id_lists(raw_id_lists, lists, expanded_id_lists); std::vector<id_list_t::iterator_t> iters; for (const auto& id_list: lists) { iters.emplace_back(id_list->new_iterator()); if (comparator == NOT_EQUALS) { auto const& filter_ids_length = id_list->num_ids(); auto const& num_ids = index->seq_ids->num_ids(); approx_filter_ids_length += (num_ids - filter_ids_length); } else { approx_filter_ids_length += id_list->num_ids(); } } id_lists.reserve(id_lists.size() + lists.size()); id_lists.emplace_back(std::move(lists)); id_list_iterators.reserve(id_list_iterators.size() + iters.size()); id_list_iterators.emplace_back(std::move(iters)); } } if (enable_lazy_evaluation) { seq_ids = std::vector<uint32_t>(id_lists.size(), UINT32_MAX); if (a_filter.apply_not_equals) { auto const& num_ids = index->seq_ids->num_ids(); approx_filter_ids_length = approx_filter_ids_length >= num_ids ? num_ids : (num_ids - approx_filter_ids_length); if (approx_filter_ids_length < numeric_filter_ids_threshold) { // Since there are very few matches, and we have to apply not equals, iteration will be inefficient. compute_iterators(); return; } else { is_not_equals_iterator = true; } } else if (approx_filter_ids_length < numeric_filter_ids_threshold) { compute_iterators(); return; } get_numeric_filter_match(true); if (is_not_equals_iterator) { seq_id = 0; } last_valid_id = index->seq_ids->last_id(); } else { if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; } } return; } else if (f.is_float()) { if (f.range_index) { auto const& trie = index->range_index.at(a_filter.field_name); for (size_t fi = 0; fi < a_filter.values.size(); fi++) { const std::string& filter_value = a_filter.values[fi]; float value = (float)std::atof(filter_value.c_str()); int64_t float_int64 = Index::float_to_int64_t(value); if (a_filter.comparators[fi] == RANGE_INCLUSIVE && fi+1 < a_filter.values.size()) { const std::string& next_filter_value = a_filter.values[fi + 1]; int64_t range_end_value = Index::float_to_int64_t((float) std::atof(next_filter_value.c_str())); trie->search_range(float_int64, true, range_end_value, true, filter_result.docs, filter_result.count); fi++; } else if (a_filter.comparators[fi] == EQUALS) { trie->search_equal_to(float_int64, filter_result.docs, filter_result.count); } else if (a_filter.comparators[fi] == NOT_EQUALS) { uint32_t* to_exclude_ids = nullptr; uint32_t to_exclude_ids_len = 0; trie->search_equal_to(float_int64, to_exclude_ids, to_exclude_ids_len); auto all_ids = index->seq_ids->uncompress(); filter_result.count = ArrayUtils::exclude_scalar(all_ids, index->seq_ids->num_ids(), to_exclude_ids, to_exclude_ids_len, &filter_result.docs); delete[] all_ids; delete[] to_exclude_ids; } else if (a_filter.comparators[fi] == GREATER_THAN || a_filter.comparators[fi] == GREATER_THAN_EQUALS) { trie->search_greater_than(float_int64, a_filter.comparators[fi] == GREATER_THAN_EQUALS, filter_result.docs, filter_result.count); } else if (a_filter.comparators[fi] == LESS_THAN || a_filter.comparators[fi] == LESS_THAN_EQUALS) { trie->search_less_than(float_int64, a_filter.comparators[fi] == LESS_THAN_EQUALS, filter_result.docs, filter_result.count); } } if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; } else { auto const& filter_values_count = a_filter.values.size(); auto num_tree = index->numerical_index.at(a_filter.field_name); size_t i = 0; for (size_t fi = 0; fi < filter_values_count; fi++, i++) { const std::string& filter_value = a_filter.values[fi]; float value = (float)std::atof(filter_value.c_str()); int64_t float_int64 = Index::float_to_int64_t(value); auto const& comparator = a_filter.comparators[fi]; if (enable_lazy_evaluation && comparator == NOT_EQUALS) { numerical_not_iterator_index.emplace(i); } std::vector<void*> raw_id_lists; if (comparator == RANGE_INCLUSIVE && fi+1 < filter_values_count) { const std::string& next_filter_value = a_filter.values[fi + 1]; int64_t range_end_value = Index::float_to_int64_t((float) std::atof(next_filter_value.c_str())); if (enable_lazy_evaluation) { raw_id_lists = num_tree->search(comparator, float_int64, range_end_value); } else { num_tree->range_inclusive_search(float_int64, range_end_value, &filter_result.docs, reinterpret_cast<size_t &>(filter_result.count)); } fi++; } else { if (enable_lazy_evaluation) { raw_id_lists = num_tree->search(comparator, float_int64); } else if (a_filter.comparators[fi] == NOT_EQUALS) { numeric_not_equals_filter(num_tree, float_int64, index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, reinterpret_cast<size_t &>(filter_result.count)); } else { num_tree->search(a_filter.comparators[fi], float_int64, &filter_result.docs, reinterpret_cast<size_t &>(filter_result.count)); } } if (enable_lazy_evaluation) { std::vector<id_list_t*> lists; ids_t::to_expanded_id_lists(raw_id_lists, lists, expanded_id_lists); std::vector<id_list_t::iterator_t> iters; for (const auto& id_list: lists) { iters.emplace_back(id_list->new_iterator()); if (comparator == NOT_EQUALS) { auto const& filter_ids_length = id_list->num_ids(); auto const& num_ids = index->seq_ids->num_ids(); approx_filter_ids_length += (num_ids - filter_ids_length); } else { approx_filter_ids_length += id_list->num_ids(); } } id_lists.reserve(id_lists.size() + lists.size()); id_lists.emplace_back(std::move(lists)); id_list_iterators.reserve(id_list_iterators.size() + iters.size()); id_list_iterators.emplace_back(std::move(iters)); } } if (enable_lazy_evaluation) { seq_ids = std::vector<uint32_t>(id_lists.size(), UINT32_MAX); if (a_filter.apply_not_equals) { auto const& num_ids = index->seq_ids->num_ids(); approx_filter_ids_length = approx_filter_ids_length >= num_ids ? num_ids : (num_ids - approx_filter_ids_length); if (approx_filter_ids_length < numeric_filter_ids_threshold) { // Since there are very few matches, and we have to apply not equals, iteration will be inefficient. compute_iterators(); return; } else { is_not_equals_iterator = true; } } else if (approx_filter_ids_length < numeric_filter_ids_threshold) { compute_iterators(); return; } get_numeric_filter_match(true); if (is_not_equals_iterator) { seq_id = 0; } last_valid_id = index->seq_ids->last_id(); } else { if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; } } return; } else if (f.is_bool()) { if (f.range_index) { auto const& trie = index->range_index.at(a_filter.field_name); size_t value_index = 0; for (const std::string& filter_value : a_filter.values) { int64_t bool_int64 = (filter_value == "1") ? 1 : 0; if (a_filter.comparators[value_index] == EQUALS) { trie->search_equal_to(bool_int64, filter_result.docs, filter_result.count); } else if (a_filter.comparators[value_index] == NOT_EQUALS) { uint32_t* to_exclude_ids = nullptr; uint32_t to_exclude_ids_len = 0; trie->search_equal_to(bool_int64, to_exclude_ids, to_exclude_ids_len); auto all_ids = index->seq_ids->uncompress(); filter_result.count = ArrayUtils::exclude_scalar(all_ids, index->seq_ids->num_ids(), to_exclude_ids, to_exclude_ids_len, &filter_result.docs); delete[] all_ids; delete[] to_exclude_ids; } value_index++; } } else { auto num_tree = index->numerical_index.at(a_filter.field_name); // For a boolean filter like `in_stock: true` that could match a large number of ids, we use bool_iterator. if (a_filter.values.size() == 1 && a_filter.comparators[0] == EQUALS && !a_filter.apply_not_equals && num_tree->approx_search_count(EQUALS, (a_filter.values[0] == "1" ? 1 : 0)) > bool_filter_ids_threshold) { bool_iterator = num_tree_t::iterator_t(num_tree, EQUALS, (a_filter.values[0] == "1" ? 1 : 0)); if (!bool_iterator.is_valid) { validity = invalid; return; } seq_id = bool_iterator.seq_id; approx_filter_ids_length = bool_iterator.approx_filter_ids_length; return; } size_t value_index = 0; for (const std::string& filter_value : a_filter.values) { int64_t bool_int64 = (filter_value == "1") ? 1 : 0; size_t result_size = filter_result.count; if (a_filter.comparators[value_index] == NOT_EQUALS) { numeric_not_equals_filter(num_tree, bool_int64, index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, result_size); } else { num_tree->search(a_filter.comparators[value_index], bool_int64, &filter_result.docs, result_size); } filter_result.count = result_size; value_index++; } } if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; return; } else if (f.is_geopoint()) { for (uint32_t fi = 0; fi < a_filter.values.size(); fi++) { const std::string& filter_value = a_filter.values[fi]; std::vector<uint32_t> geo_result_ids; std::vector<std::string> filter_value_parts; StringUtils::split(filter_value, filter_value_parts, ","); // x, y, 2, km (or) list of points bool is_polygon = StringUtils::is_float(filter_value_parts.back()); S2Region* query_region; double query_radius_meters; if (is_polygon) { const int num_verts = int(filter_value_parts.size()) / 2; std::vector<S2Point> vertices; double sum = 0.0; for (size_t point_index = 0; point_index < size_t(num_verts); point_index++) { double lat = std::stod(filter_value_parts[point_index * 2]); double lon = std::stod(filter_value_parts[point_index * 2 + 1]); if (point_index + 1 == size_t(num_verts) && lat == std::stod(filter_value_parts[0]) && lon == std::stod(filter_value_parts[1])) { // The last geopoint is same as the first one. break; } S2Point vertex = S2LatLng::FromDegrees(lat, lon).ToPoint(); vertices.emplace_back(vertex); } auto loop = new S2Loop(vertices, S2Debug::DISABLE); loop->Normalize(); // if loop is not CCW but CW, change to CCW. S2Error error; if (loop->FindValidationError(&error)) { delete loop; status = Option<bool>(400, "Polygon" + (a_filter.values.size() > 1 ? " at position " + std::to_string(fi + 1) : "") + " is invalid: " + error.text()); validity = invalid; return; } else { query_region = loop; } query_radius_meters = S2Earth::RadiansToMeters(query_region->GetCapBound().GetRadius().radians()); } else { query_radius_meters = std::stof(filter_value_parts[2]); const auto& unit = filter_value_parts[3]; if (unit == "km") { query_radius_meters *= 1000; } else { // assume "mi" (validated upstream) query_radius_meters *= 1609.34; } S1Angle query_radius_radians = S1Angle::Radians(S2Earth::MetersToRadians(query_radius_meters)); double query_lat = std::stod(filter_value_parts[0]); double query_lng = std::stod(filter_value_parts[1]); S2Point center = S2LatLng::FromDegrees(query_lat, query_lng).ToPoint(); query_region = new S2Cap(center, query_radius_radians); } std::unique_ptr<S2Region> query_region_guard(query_region); S2RegionTermIndexer::Options options; options.set_index_contains_points_only(true); S2RegionTermIndexer indexer(options); auto const& geo_range_index = index->geo_range_index.at(a_filter.field_name); std::vector<uint64_t> cell_ids; for (const auto& term : indexer.GetQueryTerms(*query_region, "")) { auto cell = S2CellId::FromToken(term); cell_ids.push_back(cell.id()); } geo_range_index->search_geopoints(cell_ids, geo_result_ids); // Skip exact filtering step if query radius is greater than the threshold. if (fi < a_filter.params.size() && query_radius_meters > a_filter.params[fi][filter::EXACT_GEO_FILTER_RADIUS_KEY].get<double>()) { uint32_t* out = nullptr; filter_result.count = ArrayUtils::or_scalar(geo_result_ids.data(), geo_result_ids.size(), filter_result.docs, filter_result.count, &out); delete[] filter_result.docs; filter_result.docs = out; continue; } // `geo_result_ids` will contain all IDs that are within approximately within query radius // we still need to do another round of exact filtering on them std::vector<uint32_t> exact_geo_result_ids; if (f.is_single_geopoint()) { auto sort_field_index = index->sort_index.at(f.name); for (auto result_id : geo_result_ids) { // no need to check for existence of `result_id` because of indexer based pre-filtering above int64_t lat_lng = sort_field_index->at(result_id); S2LatLng s2_lat_lng; GeoPoint::unpack_lat_lng(lat_lng, s2_lat_lng); if (query_region->Contains(s2_lat_lng.ToPoint())) { exact_geo_result_ids.push_back(result_id); } } } else { spp::sparse_hash_map<uint32_t, int64_t*>* geo_field_index = index->geo_array_index.at(f.name); for (auto result_id : geo_result_ids) { int64_t* lat_lngs = geo_field_index->at(result_id); bool point_found = false; // any one point should exist for (size_t li = 0; li < lat_lngs[0]; li++) { int64_t lat_lng = lat_lngs[li + 1]; S2LatLng s2_lat_lng; GeoPoint::unpack_lat_lng(lat_lng, s2_lat_lng); if (query_region->Contains(s2_lat_lng.ToPoint())) { point_found = true; break; } } if (point_found) { exact_geo_result_ids.push_back(result_id); } } } uint32_t* out = nullptr; filter_result.count = ArrayUtils::or_scalar(&exact_geo_result_ids[0], exact_geo_result_ids.size(), filter_result.docs, filter_result.count, &out); delete[] filter_result.docs; filter_result.docs = out; } if (filter_result.count == 0) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; return; } else if (f.is_string()) { art_tree* t = index->search_index.at(a_filter.field_name); for (uint32_t i = 0; i < a_filter.values.size(); i++) { auto filter_value = a_filter.values[i]; auto is_prefix_match = filter_value.size() > 1 && filter_value[filter_value.size() - 1] == '*'; if (is_prefix_match) { filter_value.erase(filter_value.size() - 1); } std::vector<void*> raw_posting_lists; // there could be multiple tokens in a filter value, which we have to treat as ANDs // e.g. country: South Africa Tokenizer tokenizer(filter_value, true, false, f.locale, index->symbols_to_index, index->token_separators); std::string str_token; size_t token_index = 0; std::vector<std::string> str_tokens; auto approx_filter_value_match = UINT32_MAX; while (tokenizer.next(str_token, token_index)) { if (str_token.size() > 100) { str_token.erase(100); } str_tokens.push_back(str_token); if (is_prefix_match) { continue; } art_leaf* leaf = (art_leaf *) art_search(t, (const unsigned char*) str_token.c_str(), str_token.length()+1); if (leaf == nullptr) { continue; } // Tokens of a filter value get AND. approx_filter_value_match = std::min(posting_t::num_ids(leaf->values), approx_filter_value_match); raw_posting_lists.push_back(leaf->values); } if (str_tokens.empty()) { status = Option<bool>(400, "Error with filter field `" + f.name + "`: Filter value cannot be empty."); validity = invalid; return; } if (is_prefix_match) { std::vector<search_field_t> fq_fields; fq_fields.emplace_back(f.name, f.name, 1, 0, true, enable_t::off); std::vector<token_t> value_tokens; for (size_t i = 0; i < str_tokens.size(); i++) { value_tokens.emplace_back(i, str_tokens[i], false, str_tokens[i].size(), 0); } value_tokens.back().is_prefix_searched = true; filter_result_iterator_t dummy_it(nullptr, 0); std::vector<sort_by> sort_fields; std::vector<std::vector<art_leaf*>> searched_filters; tsl::htrie_map<char, token_leaf> qtoken_set; Topster* topster = nullptr; spp::sparse_hash_map<uint64_t, uint32_t> groups_processed; uint32_t* all_result_ids = nullptr; size_t all_result_ids_len = 0; std::vector<std::string> group_by_fields; std::set<uint64> query_hashes; size_t typo_tokens_threshold = 0; size_t min_len_1typo = 0; size_t min_len_2typo = 0; std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values{}; const std::vector<size_t> geopoint_indices; auto fuzzy_search_fields_op = index->fuzzy_search_fields(fq_fields, value_tokens, {}, text_match_type_t::max_score, nullptr, 0, &dummy_it, {}, {}, sort_fields, {0}, searched_filters, qtoken_set, topster, groups_processed, all_result_ids, all_result_ids_len, 0, group_by_fields, false, false, false, false, query_hashes, MAX_SCORE, {true}, typo_tokens_threshold, false, max_filter_by_candidates, min_len_1typo, min_len_2typo, 0, nullptr, field_values, geopoint_indices, "", false); delete[] all_result_ids; if(!fuzzy_search_fields_op.ok()) { continue; } // Searching for `Chris P.*` will return `Chris Parnell` and `Chris Pine`. for (const auto& searched_filter_value: searched_filters) { raw_posting_lists.clear(); approx_filter_value_match = UINT32_MAX; for (const auto& leaf: searched_filter_value) { if (leaf == nullptr) { continue; } // Tokens of a filter value get AND. approx_filter_value_match = std::min(posting_t::num_ids(leaf->values), approx_filter_value_match); raw_posting_lists.push_back(leaf->values); } if (raw_posting_lists.size() != str_tokens.size()) { continue; } std::vector<posting_list_t*> plists; posting_t::to_expanded_plists(raw_posting_lists, plists, expanded_plists); if (plists.empty()) { continue; } string_prefix_filter_index.insert(posting_lists.size()); posting_lists.push_back(plists); posting_list_iterators.emplace_back(std::vector<posting_list_t::iterator_t>()); for (auto const& plist: plists) { posting_list_iterators.back().push_back(plist->new_iterator()); } // Multiple filter values get OR. approx_filter_ids_length += approx_filter_value_match; } continue; } if (raw_posting_lists.size() != str_tokens.size()) { continue; } std::vector<posting_list_t*> plists; posting_t::to_expanded_plists(raw_posting_lists, plists, expanded_plists); if (plists.empty()) { continue; } posting_lists.push_back(plists); posting_list_iterators.emplace_back(std::vector<posting_list_t::iterator_t>()); for (auto const& plist: plists) { posting_list_iterators.back().push_back(plist->new_iterator()); } // Multiple filter values get OR. approx_filter_ids_length += approx_filter_value_match; } if (a_filter.apply_not_equals) { auto const& num_ids = index->seq_ids->num_ids(); approx_filter_ids_length = approx_filter_ids_length >= num_ids ? num_ids : (num_ids - approx_filter_ids_length); if (approx_filter_ids_length < string_filter_ids_threshold) { // Since there are very few matches, and we have to apply not equals, iteration will be inefficient. compute_iterators(); return; } else { is_not_equals_iterator = true; } } else if (approx_filter_ids_length < string_filter_ids_threshold) { compute_iterators(); return; } get_string_filter_next_match(f.is_array()); if (is_not_equals_iterator) { seq_id = 0; last_valid_id = index->seq_ids->last_id(); } return; } } void filter_result_iterator_t::skip_to(uint32_t id) { if (is_filter_result_initialized) { ArrayUtils::skip_index_to_id(result_index, filter_result.docs, filter_result.count, id); if (result_index >= filter_result.count) { validity = invalid; return; } seq_id = filter_result.docs[result_index]; reference.clear(); if (filter_result.coll_to_references != nullptr) { auto& ref = filter_result.coll_to_references[result_index]; reference.insert(ref.begin(), ref.end()); } return; } const filter a_filter = filter_node->filter_exp; if (a_filter.field_name == "id") { all_seq_ids_iterator.skip_to(id); if (!all_seq_ids_iterator.valid()) { validity = invalid; return; } equals_iterator_id = seq_id = all_seq_ids_iterator.id(); return; } if (!index->field_is_indexed(a_filter.field_name)) { validity = invalid; return; } field f = index->search_schema.at(a_filter.field_name); if (f.is_integer() || f.is_float()) { // Skip all the iterators and find a new match. auto one_is_valid = false; for (uint32_t i = 0; i < id_list_iterators.size(); i++) { auto& its = id_list_iterators[i]; if (numerical_not_iterator_index.count(i) > 0) { if (id > last_valid_id) { continue; } one_is_valid = true; if (!its[0].valid() || id < its[0].id()) { seq_ids[i] = id; continue; } else if (id == its[0].id()) { seq_ids[i] = id + 1; continue; } its[0].skip_to(id); if (!its[0].valid() || id < its[0].id()) { seq_ids[i] = id; continue; } else if (id == its[0].id()) { seq_ids[i] = id + 1; continue; } continue; } switch (its.size()) { case 0: continue; case 1: if (!its[0].valid()) { its.clear(); continue; } if (its[0].id() >= id) { one_is_valid = true; continue; } its[0].skip_to(id); if (its[0].valid()) { seq_ids[i] = its[0].id(); one_is_valid = true; } continue; case 2: if (!its[0].valid() && !its[1].valid()) { its.clear(); continue; } if (its[0].valid() && its[0].id() < id) { its[0].skip_to(id); } if (its[1].valid() && its[1].id() < id) { its[1].skip_to(id); } if (its[0].valid() && its[1].valid()) { seq_ids[i] = its[0].id() < its[1].id() ? its[0].id() : its[1].id(); one_is_valid = true; } else if (its[0].valid()) { seq_ids[i] = its[0].id(); one_is_valid = true; } else if (its[1].valid()) { seq_ids[i] = its[1].id(); one_is_valid = true; } continue; default: auto is_valid = false; auto lowest_id = UINT32_MAX; for (auto it = its.begin(); it != its.end();) { if (!it->valid()) { it = its.erase(it); continue; } it->skip_to(id); if (it->valid()) { lowest_id = std::min(it->id(), lowest_id); is_valid = true; it++; } else { it = its.erase(it); } } if (is_valid) { seq_ids[i] = lowest_id; one_is_valid = true; } } } if (!one_is_valid) { is_equals_iterator_valid = false; validity = invalid; return; } auto lowest_id = UINT32_MAX; for (uint32_t i = 0; i < seq_ids.size(); i++) { if (numerical_not_iterator_index.count(i) == 0) { if (id > seq_ids[i]) { continue; } lowest_id = std::min(seq_ids[i], lowest_id); one_is_valid = true; continue; } // NOT_EQUALS comparator if (id == seq_ids[i]) { // In case id is match for not equals iterator, we need to set seq_id to id + 1. seq_ids[i]++; lowest_id = id; one_is_valid = true; } else if (id < seq_ids[i]) { lowest_id = std::min(seq_ids[i], lowest_id); one_is_valid = true; continue; } } if (one_is_valid) { equals_iterator_id = seq_id = lowest_id; } is_equals_iterator_valid = one_is_valid; validity = one_is_valid || is_not_equals_iterator ? valid : invalid; return; } else if (f.is_bool()) { bool_iterator.skip_to(id); if (!bool_iterator.is_valid) { validity = invalid; return; } equals_iterator_id = seq_id = bool_iterator.seq_id; return; } else if (f.is_string()) { // Skip all the token iterators and find a new match. for (auto& filter_value_tokens : posting_list_iterators) { for (auto& token: filter_value_tokens) { // We perform AND on tokens. Short-circuiting here. if (!token.valid()) { break; } token.skip_to(id); } } get_string_filter_next_match(f.is_array()); return; } } int filter_result_iterator_t::is_valid(uint32_t id, const bool& override_timeout) { if (validity == invalid || (!override_timeout && timeout_info != nullptr && is_timed_out())) { return -1; } // No need to traverse iterator tree if there's only one filter or compute_iterators() has been called. if (is_filter_result_initialized) { skip_to(id); return validity ? (seq_id == id ? 1 : 0) : -1; } if (filter_node->isOperator) { // We only need to consider only valid/invalid state since child nodes can never time out. auto left_validity = left_it->is_valid(id), right_validity = right_it->is_valid(id); if (filter_node->filter_operator == AND) { validity = (left_it->validity == valid && right_it->validity == valid) ? valid : invalid; if (left_validity < 1 || right_validity < 1) { if (left_validity == -1 || right_validity == -1) { return -1; } seq_id = std::max(left_it->seq_id, right_it->seq_id); return 0; } seq_id = id; reference.clear(); for (const auto& item: left_it->reference) { reference[item.first] = item.second; } for (const auto& item: right_it->reference) { reference[item.first] = item.second; } return 1; } else { validity = (left_it->validity == valid || right_it->validity == valid) ? valid : invalid; if (left_validity < 1 && right_validity < 1) { if (left_validity == -1 && right_validity == -1) { return -1; } else if (left_validity == -1) { seq_id = right_it->seq_id; return 0; } else if (right_validity == -1) { seq_id = left_it->seq_id; return 0; } seq_id = std::min(left_it->seq_id, right_it->seq_id); return 0; } seq_id = id; reference.clear(); if (left_validity == 1) { for (const auto& item: left_it->reference) { reference[item.first] = item.second; } } if (right_validity == 1) { for (const auto& item: right_it->reference) { reference[item.first] = item.second; } } return 1; } } if (is_not_equals_iterator) { if (id > last_valid_id) { validity = invalid; return -1; } validity = valid; seq_id = id + 1; if (!is_equals_iterator_valid || id < equals_iterator_id) { return 1; } else if (id == equals_iterator_id) { return 0; } } skip_to(id); if (is_not_equals_iterator) { validity = valid; seq_id = id + 1; if (id == equals_iterator_id) { return 0; } return 1; } return validity ? (id == equals_iterator_id ? 1 : 0) : -1; } Option<bool> filter_result_iterator_t::init_status() { if (is_filter_result_initialized) { return status; } else if (filter_node != nullptr && filter_node->isOperator) { auto left_status = left_it->init_status(); return !left_status.ok() ? left_status : right_it->init_status(); } return status; } bool filter_result_iterator_t::contains_atleast_one(const void *obj) { if (validity != valid) { return false; } if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = COMPACT_POSTING_PTR(obj); if (list->length == 0) { return false; } size_t i = 0; size_t num_existing_offsets = list->id_offsets[i]; size_t existing_id = list->id_offsets[i + num_existing_offsets + 1]; while (true) { if (existing_id < seq_id) { i += num_existing_offsets + 2; if (i >= list->length) { return false; } num_existing_offsets = list->id_offsets[i]; existing_id = list->id_offsets[i + num_existing_offsets + 1]; } else if (existing_id > seq_id) { auto const& result = is_valid(existing_id); if (result == 1) { return true; } else if (result == -1) { return false; } } else { return true; } } } else { auto list = (posting_list_t*)(obj); posting_list_t::iterator_t it = list->new_iterator(); if (!it.valid()) { return false; } while (true) { if (it.id() < seq_id) { it.skip_to(seq_id); if (!it.valid()) { return false; } } else if (it.id() > seq_id) { auto const& result = is_valid(it.id()); if (result == 1) { return true; } else if (result == -1) { return false; } } else { return true; } } } return false; } void filter_result_iterator_t::reset(const bool& override_timeout) { if (filter_node == nullptr) { return; } if (!override_timeout && timeout_info != nullptr && is_timed_out()) { return; } // No need to traverse iterator tree if there's only one filter or compute_iterators() has been called. if (is_filter_result_initialized) { if (filter_result.count == 0) { validity = invalid; return; } result_index = 0; seq_id = filter_result.docs[result_index]; reference.clear(); if (filter_result.coll_to_references != nullptr) { auto& ref = filter_result.coll_to_references[result_index]; reference.insert(ref.begin(), ref.end()); } validity = valid; return; } if (filter_node->isOperator) { // Reset the subtrees then apply operators to arrive at the first valid doc. left_it->reset(); right_it->reset(); validity = valid; if (filter_node->filter_operator == AND) { and_filter_iterators(); } else { or_filter_iterators(); } return; } const filter a_filter = filter_node->filter_exp; if (a_filter.field_name == "id") { all_seq_ids_iterator = index->seq_ids->new_iterator(); if (all_seq_ids_iterator.valid()) { seq_id = all_seq_ids_iterator.id(); approx_filter_ids_length = index->seq_ids->num_ids(); validity = valid; } else { validity = invalid; } return; } if (!index->field_is_indexed(a_filter.field_name)) { return; } field f = index->search_schema.at(a_filter.field_name); if (f.is_integer() || f.is_float()) { for (uint32_t i = 0; i < id_lists.size(); i++) { auto const& lists = id_lists[i]; id_list_iterators[i].clear(); for (auto const& list: lists) { id_list_iterators[i].emplace_back(list->new_iterator()); } } seq_ids = std::vector<uint32_t>(id_lists.size(), UINT32_MAX); get_numeric_filter_match(true); if (is_not_equals_iterator) { seq_id = 0; } return; } else if (f.is_bool()) { bool_iterator.reset(); seq_id = bool_iterator.seq_id; validity = bool_iterator.is_valid ? valid : invalid; return; } else if (f.is_string()) { for (uint32_t i = 0; i < posting_lists.size(); i++) { auto const& plists = posting_lists[i]; posting_list_iterators[i].clear(); for (auto const& plist: plists) { posting_list_iterators[i].push_back(plist->new_iterator()); } } get_string_filter_next_match(f.is_array()); if (is_not_equals_iterator) { seq_id = 0; } return; } } uint32_t filter_result_iterator_t::to_filter_id_array(uint32_t*& filter_array) { if (!is_filter_result_initialized) { return 0; } filter_array = new uint32_t[filter_result.count]; std::copy(filter_result.docs, filter_result.docs + filter_result.count, filter_array); return filter_result.count; } uint32_t filter_result_iterator_t::and_scalar(const uint32_t* A, const uint32_t& lenA, uint32_t*& results) { if (validity != valid) { return 0; } if (is_filter_result_initialized) { return ArrayUtils::and_scalar(A, lenA, filter_result.docs, filter_result.count, &results); } std::vector<uint32_t> filter_ids; for (uint32_t i = 0; i < lenA; i++) { auto const& id = A[i]; auto const& result = is_valid(id); if (result == 1) { filter_ids.push_back(id); } else if (result == -1) { break; } } if (filter_ids.empty()) { return 0; } results = new uint32_t[filter_ids.size()]; std::copy(filter_ids.begin(), filter_ids.end(), results); return filter_ids.size(); } void filter_result_iterator_t::and_scalar(const uint32_t* A, const uint32_t& lenA, filter_result_t& result) { if (validity != valid) { return; } if (filter_result.coll_to_references == nullptr) { if (is_filter_result_initialized) { result.count = ArrayUtils::and_scalar(A, lenA, filter_result.docs, filter_result.count, &result.docs); return; } std::vector<uint32_t> filter_ids; for (uint32_t i = 0; i < lenA; i++) { auto const& id = A[i]; auto const& _result = is_valid(id); if (_result == 1) { filter_ids.push_back(id); } else if (_result == -1) { break; } } if (filter_ids.empty()) { return; } result.count = filter_ids.size(); result.docs = new uint32_t[filter_ids.size()]; std::copy(filter_ids.begin(), filter_ids.end(), result.docs); return; } if (!is_filter_result_initialized) { compute_iterators(); } std::vector<uint32_t> match_indexes; for (uint32_t i = 0; i < lenA; i++) { auto _result = is_valid(A[i]); if (_result == 1) { match_indexes.push_back(result_index); } else if (_result == -1) { break; } } result.count = match_indexes.size(); result.docs = new uint32_t[match_indexes.size()]; result.coll_to_references = new std::map<std::string, reference_filter_result_t>[match_indexes.size()] {}; for (uint32_t i = 0; i < match_indexes.size(); i++) { auto const& match_index = match_indexes[i]; result.docs[i] = filter_result.docs[match_index]; auto& result_reference = result.coll_to_references[i]; result_reference.insert(filter_result.coll_to_references[match_index].begin(), filter_result.coll_to_references[match_index].end()); } } filter_result_iterator_t::filter_result_iterator_t(const std::string& collection_name, const Index *const index, const filter_node_t *const filter_node, const bool& enable_lazy_evaluation, const size_t& max_candidates, uint64_t search_begin, uint64_t search_stop) : collection_name(collection_name), index(index), filter_node(filter_node) { if (filter_node == nullptr) { validity = invalid; return; } // Only initialize timeout_info in the root node. We won't pass search_begin/search_stop parameters to the sub-nodes. if (search_stop != UINT64_MAX) { timeout_info = std::make_unique<filter_result_iterator_timeout_info>(search_begin, search_stop); } // Generate the iterator tree and then initialize each node. if (filter_node->isOperator) { left_it = new filter_result_iterator_t(collection_name, index, filter_node->left, enable_lazy_evaluation, max_candidates); // If left subtree of && operator is invalid, we don't have to evaluate its right subtree. if (filter_node->filter_operator == AND && left_it->validity == invalid) { validity = invalid; is_filter_result_initialized = true; delete left_it; left_it = nullptr; return; } right_it = new filter_result_iterator_t(collection_name, index, filter_node->right, enable_lazy_evaluation, max_candidates); } max_filter_by_candidates = max_candidates; init(enable_lazy_evaluation); if (!validity) { this->approx_filter_ids_length = 0; } } filter_result_iterator_t::~filter_result_iterator_t() { // In case the filter was on string field. for(auto expanded_plist: expanded_plists) { delete expanded_plist; } // In case the filter was on int/float field. for (auto item: expanded_id_lists) { delete item; } if (delete_filter_node) { delete filter_node; } delete left_it; delete right_it; } filter_result_iterator_t& filter_result_iterator_t::operator=(filter_result_iterator_t&& obj) noexcept { if (&obj == this) { return *this; } // In case the filter was on string field. for(auto expanded_plist: expanded_plists) { delete expanded_plist; } delete left_it; delete right_it; collection_name = obj.collection_name; index = obj.index; filter_node = obj.filter_node; left_it = obj.left_it; right_it = obj.right_it; obj.left_it = nullptr; obj.right_it = nullptr; result_index = obj.result_index; filter_result = std::move(obj.filter_result); posting_list_iterators = std::move(obj.posting_list_iterators); expanded_plists = std::move(obj.expanded_plists); validity = obj.validity; seq_id = obj.seq_id; reference = std::move(obj.reference); status = std::move(obj.status); is_filter_result_initialized = obj.is_filter_result_initialized; approx_filter_ids_length = obj.approx_filter_ids_length; return *this; } void filter_result_iterator_t::get_n_ids(const uint32_t& n, filter_result_t*& result, const bool& override_timeout) { if (!is_filter_result_initialized) { return; } if (!override_timeout && timeout_info != nullptr) { // In Index::search_wildcard number of calls to get_n_ids will be min(number of threads, filter match ids). // Therefore, `timeout_info->function_call_counter` won't reach `function_call_modulo` if only incremented on // function call. if (n > function_call_modulo && is_timed_out(true)) { return; } } auto result_length = result->count = std::min(n, filter_result.count - result_index); result->docs = new uint32_t[result_length]; if (filter_result.coll_to_references != nullptr) { result->coll_to_references = new std::map<std::string, reference_filter_result_t>[result_length] {}; } for (uint32_t i = 0; i < result_length; i++, result_index++) { result->docs[i] = filter_result.docs[result_index]; if (filter_result.coll_to_references == nullptr) { continue; } auto& result_reference = result->coll_to_references[i]; // Moving references since get_n_ids is only called in wildcard search flow and filter_result_iterator is // not used afterwards. result_reference = std::move(filter_result.coll_to_references[result_index]); } validity = result_index < filter_result.count ? valid : invalid; } void filter_result_iterator_t::get_n_ids(const uint32_t& n, uint32_t& excluded_result_index, uint32_t const* const excluded_result_ids, const size_t& excluded_result_ids_size, filter_result_t*& result, const bool& override_timeout) { if (excluded_result_ids == nullptr || excluded_result_ids_size == 0 || excluded_result_index >= excluded_result_ids_size) { return get_n_ids(n, result, override_timeout); } // This method is only called in Index::search_wildcard after filter_result_iterator_t::compute_iterators. if (!is_filter_result_initialized) { return; } if (!override_timeout && timeout_info != nullptr) { // In Index::search_wildcard number of calls to get_n_ids will be min(number of threads, filter match ids). // Therefore, `timeout_info->function_call_counter` won't reach `function_call_modulo` if only incremented on // function call. if (n > function_call_modulo && is_timed_out(true)) { return; } } std::vector<uint32_t> match_indexes; for (uint32_t count = 0; count < n && result_index < filter_result.count; result_index++) { auto id = filter_result.docs[result_index]; if (!ArrayUtils::skip_index_to_id(excluded_result_index, excluded_result_ids, excluded_result_ids_size, id)) { match_indexes.push_back(result_index); count++; } } result->count = match_indexes.size(); result->docs = new uint32_t[match_indexes.size()]; if (filter_result.coll_to_references != nullptr) { result->coll_to_references = new std::map<std::string, reference_filter_result_t>[match_indexes.size()] {}; } for (uint32_t i = 0; i < match_indexes.size(); i++) { auto const& match_index = match_indexes[i]; result->docs[i] = filter_result.docs[match_index]; if (filter_result.coll_to_references == nullptr) { continue; } auto& result_reference = result->coll_to_references[i]; // Moving references since get_n_ids is only called in wildcard search flow and filter_result_iterator is // not used afterwards. result_reference = std::move(filter_result.coll_to_references[match_index]); } validity = result_index < filter_result.count ? valid : invalid; } filter_result_iterator_t::filter_result_iterator_t(uint32_t approx_filter_ids_length) : approx_filter_ids_length(approx_filter_ids_length) { filter_node = new filter_node_t(AND, nullptr, nullptr); delete_filter_node = true; } filter_result_iterator_t::filter_result_iterator_t(uint32_t* ids, const uint32_t& ids_count, const size_t& max_candidates, uint64_t search_begin, uint64_t search_stop) { filter_result.count = approx_filter_ids_length = ids_count; filter_result.docs = ids; validity = ids_count > 0 ? valid : invalid; if (validity) { seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; filter_node = new filter_node_t(filter{"dummy", {}, {}}); delete_filter_node = true; if (search_stop != UINT64_MAX) { timeout_info = std::make_unique<filter_result_iterator_timeout_info>(search_begin, search_stop); } } max_filter_by_candidates = max_candidates; } void filter_result_iterator_t::add_phrase_ids(filter_result_iterator_t*& fit, uint32_t* phrase_result_ids, const uint32_t& phrase_result_count) { fit->reset(); auto root_iterator = new filter_result_iterator_t(std::min(phrase_result_count, fit->approx_filter_ids_length)); root_iterator->left_it = new filter_result_iterator_t(phrase_result_ids, phrase_result_count, fit->max_filter_by_candidates); root_iterator->right_it = fit; root_iterator->timeout_info = std::move(fit->timeout_info); root_iterator->and_filter_iterators(); fit = root_iterator; } void filter_result_iterator_t::compute_iterators() { if (filter_node == nullptr) { validity = invalid; is_filter_result_initialized = false; return; } if (timeout_info != nullptr && is_timed_out()) { return; } if (is_filter_result_initialized) { return; } if (filter_node->isOperator) { if (timeout_info != nullptr) { // Passing timeout_info into subtree so individual nodes can check for timeout. left_it->timeout_info = std::make_unique<filter_result_iterator_timeout_info>(*timeout_info); right_it->timeout_info = std::make_unique<filter_result_iterator_timeout_info>(*timeout_info); } left_it->compute_iterators(); right_it->compute_iterators(); if (filter_node->filter_operator == AND) { filter_result_t::and_filter_results(left_it->filter_result, right_it->filter_result, filter_result); } else { filter_result_t::or_filter_results(left_it->filter_result, right_it->filter_result, filter_result); } if (left_it->validity == timed_out || right_it->validity == timed_out || (timeout_info != nullptr && is_timed_out(true))) { validity = timed_out; } // In a complex filter query a sub-expression might not match any document while the full expression does match // at least one document. If the full expression doesn't match any document, we return early in the search. if (filter_result.count == 0 && validity != timed_out) { validity = invalid; } else if (filter_result.count > 0) { result_index = 0; seq_id = filter_result.docs[result_index]; approx_filter_ids_length = filter_result.count; if (filter_result.coll_to_references != nullptr) { for (const auto& ref_result: filter_result.coll_to_references[result_index]) { reference.insert(ref_result); } } } is_filter_result_initialized = true; // Deleting subtree since we've already computed the result. delete left_it; left_it = nullptr; delete right_it; right_it = nullptr; return; } const filter a_filter = filter_node->filter_exp; if (a_filter.field_name == "id") { if (index->seq_ids->num_ids() == 0) { validity = invalid; return; } filter_result.docs = index->seq_ids->uncompress(); filter_result.count = index->seq_ids->num_ids(); if (timeout_info != nullptr) { is_timed_out(true); } if (validity != timed_out && filter_result.count == 0) { validity = invalid; return; } result_index = 0; seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; return; } if (index->search_schema.count(a_filter.field_name) == 0) { return; } field f = index->search_schema.at(a_filter.field_name); if (f.is_integer() || f.is_float()) { uint32_t* filter_ids = nullptr; size_t filter_ids_len = 0; // aggregates IDs to reduce excessive ORing std::vector<uint32_t> f_id_buff; for (uint32_t i = 0; i < id_lists.size(); i++) { auto const& lists = id_lists[i]; auto const& is_not_equals_comparator = numerical_not_iterator_index.count(i) != 0; if (lists.empty() && is_not_equals_comparator) { auto all_ids = index->seq_ids->uncompress(); std::copy(all_ids, all_ids + index->seq_ids->num_ids(), std::back_inserter(f_id_buff)); delete[] all_ids; continue; } for (const auto& list: lists) { if (is_not_equals_comparator) { std::vector<uint32_t> equals_ids; list->uncompress(equals_ids); uint32_t* not_equals_ids = nullptr; auto const not_equals_ids_len = ArrayUtils::exclude_scalar(index->seq_ids->uncompress(), index->seq_ids->num_ids(), &equals_ids[0], equals_ids.size(), &not_equals_ids); std::copy(not_equals_ids, not_equals_ids + not_equals_ids_len, std::back_inserter(f_id_buff)); } else { list->uncompress(f_id_buff); } if (f_id_buff.size() >= 100'000) { gfx::timsort(f_id_buff.begin(), f_id_buff.end()); f_id_buff.erase(std::unique( f_id_buff.begin(), f_id_buff.end() ), f_id_buff.end()); uint32_t* out = nullptr; filter_ids_len = ArrayUtils::or_scalar(filter_ids, filter_ids_len, f_id_buff.data(), f_id_buff.size(), &out); delete[] filter_ids; filter_ids = out; std::vector<uint32_t>().swap(f_id_buff); // clears out memory if (timeout_info != nullptr && is_timed_out(true)) { goto compute_done; } } } } compute_done: if (!f_id_buff.empty()) { gfx::timsort(f_id_buff.begin(), f_id_buff.end()); f_id_buff.erase(std::unique( f_id_buff.begin(), f_id_buff.end() ), f_id_buff.end()); uint32_t* out = nullptr; filter_ids_len = ArrayUtils::or_scalar(filter_ids, filter_ids_len, f_id_buff.data(), f_id_buff.size(), &out); delete[] filter_ids; filter_ids = out; std::vector<uint32_t>().swap(f_id_buff); // clears out memory } filter_result.docs = filter_ids; filter_result.count = filter_ids_len; if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } } else if (f.is_bool()) { auto num_tree = index->numerical_index.at(a_filter.field_name); int64_t bool_int64 = (a_filter.values[0] == "1") ? 1 : 0; size_t result_size = 0; num_tree->search(a_filter.comparators[0], bool_int64, &filter_result.docs, result_size); filter_result.count = result_size; if (timeout_info != nullptr) { is_timed_out(true); } } else if (f.is_string()) { // Resetting posting_list_iterators. for (uint32_t i = 0; i < posting_lists.size(); i++) { auto const& plists = posting_lists[i]; posting_list_iterators[i].clear(); for (auto const& plist: plists) { posting_list_iterators[i].push_back(plist->new_iterator()); } } uint32_t* or_ids = nullptr; size_t or_ids_size = 0; // aggregates IDs across array of filter values and reduces excessive ORing std::vector<uint32_t> f_id_buff; for (uint32_t i = 0; i < posting_lists.size(); i++) { auto& p_list = posting_lists[i]; if (string_prefix_filter_index.count(i) != 0 && (a_filter.comparators[0] == EQUALS || a_filter.comparators[0] == NOT_EQUALS)) { // Exact prefix match, needs intersection + prefix matching std::vector<uint32_t> result_id_vec; posting_list_t::intersect(p_list, result_id_vec); if (result_id_vec.empty()) { continue; } // need to do prefix match uint32_t* prefix_str_ids = new uint32_t[result_id_vec.size()]; size_t prefix_str_ids_size = 0; std::unique_ptr<uint32_t[]> prefix_str_ids_guard(prefix_str_ids); posting_list_t::get_prefix_matches(posting_list_iterators[i], f.is_array(), result_id_vec.data(), result_id_vec.size(), prefix_str_ids, prefix_str_ids_size); if (prefix_str_ids_size == 0) { continue; } for (size_t pi = 0; pi < prefix_str_ids_size; pi++) { f_id_buff.push_back(prefix_str_ids[pi]); } } else if (a_filter.comparators[0] == EQUALS || a_filter.comparators[0] == NOT_EQUALS) { // needs intersection + exact matching (unlike CONTAINS) std::vector<uint32_t> result_id_vec; posting_list_t::intersect(p_list, result_id_vec); if (result_id_vec.empty()) { continue; } // need to do exact match uint32_t* exact_str_ids = new uint32_t[result_id_vec.size()]; size_t exact_str_ids_size = 0; std::unique_ptr<uint32_t[]> exact_str_ids_guard(exact_str_ids); posting_list_t::get_exact_matches(posting_list_iterators[i], f.is_array(), result_id_vec.data(), result_id_vec.size(), exact_str_ids, exact_str_ids_size); if (exact_str_ids_size == 0) { continue; } for (size_t ei = 0; ei < exact_str_ids_size; ei++) { f_id_buff.push_back(exact_str_ids[ei]); } } else { // CONTAINS size_t before_size = f_id_buff.size(); posting_list_t::intersect(p_list, f_id_buff); if (f_id_buff.size() == before_size) { continue; } } if (f_id_buff.size() > 100000 || a_filter.values.size() == 1) { gfx::timsort(f_id_buff.begin(), f_id_buff.end()); f_id_buff.erase(std::unique( f_id_buff.begin(), f_id_buff.end() ), f_id_buff.end()); uint32_t* out = nullptr; or_ids_size = ArrayUtils::or_scalar(or_ids, or_ids_size, f_id_buff.data(), f_id_buff.size(), &out); delete[] or_ids; or_ids = out; std::vector<uint32_t>().swap(f_id_buff); // clears out memory if (timeout_info != nullptr && is_timed_out(true)) { break; } } } if (!f_id_buff.empty()) { gfx::timsort(f_id_buff.begin(), f_id_buff.end()); f_id_buff.erase(std::unique( f_id_buff.begin(), f_id_buff.end() ), f_id_buff.end()); uint32_t* out = nullptr; or_ids_size = ArrayUtils::or_scalar(or_ids, or_ids_size, f_id_buff.data(), f_id_buff.size(), &out); delete[] or_ids; or_ids = out; std::vector<uint32_t>().swap(f_id_buff); // clears out memory } filter_result.docs = or_ids; filter_result.count = or_ids_size; if (a_filter.apply_not_equals) { apply_not_equals(index->seq_ids->uncompress(), index->seq_ids->num_ids(), filter_result.docs, filter_result.count); } } if (validity != timed_out && filter_result.count == 0) { validity = invalid; return; } result_index = 0; seq_id = filter_result.docs[result_index]; is_filter_result_initialized = true; approx_filter_ids_length = filter_result.count; } bool filter_result_iterator_t::is_timed_out(const bool& override_function_call_counter) { if (validity == timed_out) { return true; } if (override_function_call_counter || ++(timeout_info->function_call_counter) % function_call_modulo == 0) { if ((std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count() - timeout_info->search_begin_us) > timeout_info->search_stop_us) { validity = timed_out; return true; } } return false; } filter_result_iterator_timeout_info::filter_result_iterator_timeout_info(uint64_t search_begin, uint64_t search_stop) : search_begin_us(search_begin), search_stop_us(search_stop) {}
105,134
C++
.cpp
2,305
31.043384
143
0.50041
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,796
archive_utils.cpp
typesense_typesense/src/archive_utils.cpp
#include "archive_utils.h" #include "tsconfig.h" #include <fstream> #include <cstdio> #include <cstring> #include <stdexcept> bool ArchiveUtils::extract_tar_gz_from_file(const std::string& archive_path, const std::string& destination_path) { struct archive* a = nullptr; struct archive* ext = nullptr; struct archive_entry* entry; int flags = ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_PERM | ARCHIVE_EXTRACT_ACL | ARCHIVE_EXTRACT_FFLAGS; bool extraction_successful = true; a = archive_read_new(); if (!a) { return false; } archive_read_support_format_all(a); archive_read_support_filter_all(a); ext = archive_write_disk_new(); if (!ext) { archive_read_free(a); return false; } archive_write_disk_set_options(ext, flags); archive_write_disk_set_standard_lookup(ext); if (archive_read_open_filename(a, archive_path.c_str(), BUFFER_SIZE) != ARCHIVE_OK) { extraction_successful = false; } while (extraction_successful) { int r = archive_read_next_header(a, &entry); if (r == ARCHIVE_EOF) { break; } if (r < ARCHIVE_WARN) { extraction_successful = false; break; } const char* current_file = archive_entry_pathname(entry); std::string full_path = destination_path + "/" + current_file; archive_entry_set_pathname(entry, full_path.c_str()); r = archive_write_header(ext, entry); if (r < ARCHIVE_OK) { extraction_successful = false; break; } if (archive_entry_size(entry) > 0) { r = copy_data(a, ext); if (r < ARCHIVE_WARN) { extraction_successful = false; break; } } r = archive_write_finish_entry(ext); if (r < ARCHIVE_WARN) { extraction_successful = false; break; } } archive_read_close(a); archive_read_free(a); archive_write_close(ext); archive_write_free(ext); return extraction_successful; } bool ArchiveUtils::extract_tar_gz_from_memory(const std::string& archive_content, const std::string& destination_path) { if (archive_content.empty()) { return false; } std::string temp_file_path = create_temp_tar_gz(archive_content); bool result = extract_tar_gz_from_file(temp_file_path, destination_path); cleanup(temp_file_path); return result; } int ArchiveUtils::copy_data(struct archive* ar, struct archive* aw) { int r; const void* buff; size_t size; la_int64_t offset; for (;;) { r = archive_read_data_block(ar, &buff, &size, &offset); if (r == ARCHIVE_EOF) return ARCHIVE_OK; if (r < ARCHIVE_OK) return r; r = archive_write_data_block(aw, buff, size, offset); if (r < ARCHIVE_OK) { return r; } } } bool ArchiveUtils::create_directory(const std::string& path) { return mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) == 0; } std::string ArchiveUtils::create_temp_tar_gz(const std::string& content) { std::string tmp_dir = Config::get_instance().get_data_dir() + "/tmp"; if (!create_directory(tmp_dir)) { throw std::runtime_error("Failed to create temporary directory: " + tmp_dir); } std::string temp_file_template = tmp_dir + "/archive_XXXXXX"; std::vector<char> temp_filename(temp_file_template.begin(), temp_file_template.end()); temp_filename.push_back('\0'); int fd = mkstemp(temp_filename.data()); if (fd == -1) { throw std::runtime_error("Failed to create temporary file"); } std::string temp_file_path = std::string(temp_filename.data()) + TAR_GZ_EXTENSION; close(fd); if (std::rename(temp_filename.data(), temp_file_path.c_str()) != 0) { throw std::runtime_error("Failed to rename temporary file"); } std::ofstream temp_file(temp_file_path, std::ios::binary); if (!temp_file) { throw std::runtime_error("Failed to open temporary file for writing"); } temp_file.write(content.data(), content.size()); if (!temp_file) { throw std::runtime_error("Failed to write content to temporary file"); } return temp_file_path; } void ArchiveUtils::cleanup(const std::string& file_path) { if (std::remove(file_path.c_str()) != 0) { throw std::runtime_error("Failed to delete temporary file: " + file_path); } // Delete the temp directory std::string tmp_dir = Config::get_instance().get_data_dir() + "/tmp"; if (rmdir(tmp_dir.c_str()) != 0) { throw std::runtime_error("Failed to delete temporary directory: " + tmp_dir); } } bool ArchiveUtils::verify_tar_gz_archive(const std::string& archive_content) { struct archive* a = archive_read_new(); if (!a) { return false; } bool is_valid = true; archive_read_support_format_all(a); archive_read_support_filter_all(a); if (archive_read_open_memory(a, archive_content.data(), archive_content.size()) != ARCHIVE_OK) { is_valid = false; } struct archive_entry* entry; while (is_valid) { int r = archive_read_next_header(a, &entry); if (r == ARCHIVE_EOF) { break; } if (r < ARCHIVE_WARN) { is_valid = false; break; } } archive_read_close(a); archive_read_free(a); return is_valid; }
5,529
C++
.cpp
156
28.653846
120
0.614259
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,797
system_metrics.cpp
typesense_typesense/src/system_metrics.cpp
#include "system_metrics.h" #include <sys/resource.h> #include <sys/statvfs.h> #if __linux__ #include <sys/types.h> #include <sys/sysinfo.h> #include <unistd.h> #elif __APPLE__ #include <unistd.h> #include <mach/vm_statistics.h> #include <mach/mach_types.h> #include <mach/mach_init.h> #include <mach/mach_host.h> #endif #include "string_utils.h" #ifndef ASAN_BUILD #include "jemalloc.h" #if __APPLE__ #define impl_mallctl je_mallctl #else #define impl_mallctl mallctl #endif #endif uint64_t SystemMetrics::non_proc_mem_last_access = 0; uint64_t SystemMetrics::non_proc_mem_bytes = 0; void SystemMetrics::get(const std::string &data_dir_path, nlohmann::json &result) { // DISK METRICS struct statvfs st{}; statvfs(data_dir_path.c_str(), &st); uint64_t disk_total_bytes = st.f_blocks * st.f_frsize; uint64_t disk_used_bytes = (st.f_blocks - st.f_bavail) * st.f_frsize; result["system_disk_total_bytes"] = std::to_string(disk_total_bytes); result["system_disk_used_bytes"] = std::to_string(disk_used_bytes); // MEMORY METRICS size_t sz, active = 1, allocated = 1, resident, metadata, mapped, retained; sz = sizeof(size_t); uint64_t epoch = 1; #ifndef ASAN_BUILD // See: http://jemalloc.net/jemalloc.3.html#stats.active impl_mallctl("thread.tcache.flush", nullptr, nullptr, nullptr, 0); impl_mallctl("epoch", &epoch, &sz, &epoch, sz); impl_mallctl("stats.active", &active, &sz, nullptr, 0); impl_mallctl("stats.allocated", &allocated, &sz, nullptr, 0); impl_mallctl("stats.resident", &resident, &sz, nullptr, 0); impl_mallctl("stats.metadata", &metadata, &sz, nullptr, 0); impl_mallctl("stats.mapped", &mapped, &sz, nullptr, 0); impl_mallctl("stats.retained", &retained, &sz, nullptr, 0); #endif result["typesense_memory_active_bytes"] = std::to_string(active); result["typesense_memory_allocated_bytes"] = std::to_string(allocated); result["typesense_memory_resident_bytes"] = std::to_string(active); result["typesense_memory_metadata_bytes"] = std::to_string(metadata); result["typesense_memory_mapped_bytes"] = std::to_string(mapped); result["typesense_memory_retained_bytes"] = std::to_string(retained); // Fragmentation ratio is calculated very similar to how Redis does it: // https://github.com/redis/redis/blob/d6180c8c8674ffdae3d6efa5f946d85fe9163464/src/defrag.c#L900 std::string frag_ratio = format_dp(1.0f - ((float)allocated / active)); result["typesense_memory_fragmentation_ratio"] = frag_ratio; result["system_memory_total_bytes"] = std::to_string(get_memory_total_bytes()); result["system_memory_used_bytes"] = std::to_string(get_memory_used_bytes()); #ifdef __linux__ struct sysinfo sys_info; sysinfo(&sys_info); auto swap_used_bytes = sys_info.totalswap - sys_info.freeswap; result["system_memory_total_swap_bytes"] = std::to_string(sys_info.totalswap); result["system_memory_used_swap_bytes"] = std::to_string(swap_used_bytes); #endif // CPU and Network metrics #if __linux__ const std::vector<cpu_stat_t>& cpu_stats = get_cpu_stats(); for(size_t i = 0; i < cpu_stats.size(); i++) { std::string cpu_id = (i == 0) ? "" : std::to_string(i); result["system_cpu" + cpu_id + "_active_percentage"] = cpu_stats[i].active; } uint64_t received_bytes, sent_bytes; linux_get_network_data("/proc/net/dev", received_bytes, sent_bytes); result["system_network_received_bytes"] = std::to_string(received_bytes); result["system_network_sent_bytes"] = std::to_string(sent_bytes); #endif } float SystemMetrics::used_memory_ratio() { // non process memory bytes is updated only periodically since it's expensive uint64_t memory_consumed_bytes = get_memory_active_bytes() + get_memory_non_proc_bytes(); uint64_t memory_total_bytes = get_memory_total_bytes(); return ((float)memory_consumed_bytes / memory_total_bytes); } uint64_t SystemMetrics::linux_get_mem_available_bytes() { std::string token; std::ifstream file("/proc/meminfo"); while(file >> token) { if(token == "MemAvailable:") { uint64_t mem_kB; if(file >> mem_kB) { return mem_kB * 1024; } else { return 0; } } } return 0; // nothing found } uint64_t SystemMetrics::get_memory_active_bytes() { size_t sz, memory_active_bytes = 1; sz = sizeof(size_t); uint64_t epoch = 1; #ifndef ASAN_BUILD impl_mallctl("epoch", &epoch, &sz, &epoch, sz); impl_mallctl("stats.active", &memory_active_bytes, &sz, nullptr, 0); #endif return memory_active_bytes; } uint64_t SystemMetrics::get_memory_used_bytes() { uint64_t memory_used_bytes = 0; #ifdef __APPLE__ vm_size_t mach_page_size; mach_port_t mach_port; mach_msg_type_number_t count; vm_statistics64_data_t vm_stats; mach_port = mach_host_self(); count = sizeof(vm_stats) / sizeof(natural_t); if (KERN_SUCCESS == host_page_size(mach_port, &mach_page_size) && KERN_SUCCESS == host_statistics64(mach_port, HOST_VM_INFO, (host_info64_t)&vm_stats, &count)) { memory_used_bytes = ((int64_t)(vm_stats.active_count + vm_stats.wire_count) * (int64_t)mach_page_size); } #elif __linux__ memory_used_bytes = get_memory_total_bytes() - linux_get_mem_available_bytes(); #endif return memory_used_bytes; } uint64_t SystemMetrics::get_memory_total_bytes() { uint64_t memory_total_bytes = 0; #ifdef __APPLE__ uint64_t pages = sysconf(_SC_PHYS_PAGES); uint64_t page_size = sysconf(_SC_PAGE_SIZE); memory_total_bytes = (pages * page_size); #elif __linux__ struct sysinfo sys_info; sysinfo(&sys_info); memory_total_bytes = sys_info.totalram; #endif return memory_total_bytes; } uint64_t SystemMetrics::get_memory_non_proc_bytes() { uint64_t now = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); uint64_t seconds_since_last = (now - non_proc_mem_last_access); if(seconds_since_last > NON_PROC_MEM_UPDATE_INTERVAL_SECONDS) { uint64_t memory_used_bytes = get_memory_used_bytes(); non_proc_mem_bytes = memory_used_bytes - get_memory_active_bytes(); } non_proc_mem_last_access = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); return non_proc_mem_bytes; } void SystemMetrics::linux_get_network_data(const std::string & stat_path, uint64_t &received_bytes, uint64_t &sent_bytes) { //std::ifstream stat_file("/proc/net/dev"); std::ifstream stat_file(stat_path); std::string line; // TODO: this probably needs to be handled better! const std::string STR_ENS5("ens5"); const std::string STR_ETH0("eth0"); /* Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed ens5: 324278716 897631 0 0 0 0 0 0 93933882 575535 0 0 0 0 0 0 */ received_bytes = 0; sent_bytes = 0; while (std::getline(stat_file, line)) { StringUtils::trim(line); if (line.rfind(STR_ENS5, 0) == 0 || line.rfind(STR_ETH0, 0) == 0) { std::istringstream ss(line); std::string throwaway; // read interface label ss >> throwaway; uint64_t stat_value; // read stats for (int i = 0; i < NUM_NETWORK_STATS; i++) { ss >> stat_value; if(i == 0) { received_bytes = stat_value; } if(i == 8) { sent_bytes = stat_value; } } break; } } }
8,050
C++
.cpp
192
35.921875
130
0.637062
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,798
posting.cpp
typesense_typesense/src/posting.cpp
#include "posting.h" #include "posting_list.h" int64_t compact_posting_list_t::upsert(const uint32_t id, const std::vector<uint32_t>& offsets) { return upsert(id, &offsets[0], offsets.size()); } int64_t compact_posting_list_t::upsert(const uint32_t id, const uint32_t* offsets, uint32_t num_offsets) { // format: num_offsets, offset1,..,offsetn, id1 | num_offsets, offset1,..,offsetn, id2 uint32_t last_id = (length == 0) ? 0 : id_offsets[length - 1]; int64_t extra_length_needed = 0; if(length == 0 || id > last_id) { extra_length_needed = (num_offsets + 2); if(length + extra_length_needed > capacity) { // enough storage should have been provided upstream return (length + extra_length_needed) - capacity; } // can just append to the end id_offsets[length++] = num_offsets; for(size_t i = 0; i < num_offsets; i++) { id_offsets[length+i] = offsets[i]; } length += num_offsets; id_offsets[length++] = id; ids_length++; } else { // locate position and shift contents to make space available int64_t i = 0; while(i < length) { size_t num_existing_offsets = id_offsets[i]; size_t existing_id = id_offsets[i + num_existing_offsets + 1]; if(existing_id == id) { extra_length_needed = (num_offsets - num_existing_offsets); if(extra_length_needed > 0) { if(length + extra_length_needed > capacity) { // enough storage should have been provided upstream return (length + extra_length_needed) - capacity; } // shift offsets to the right to make space int64_t shift_index = int64_t(length) + extra_length_needed - 1; while(shift_index >= i && (shift_index - extra_length_needed) >= 0) { id_offsets[shift_index] = id_offsets[shift_index - extra_length_needed]; shift_index--; } } else if(extra_length_needed < 0) { // shift offsets to the left to reduce space // [num_offsets][0][2][4][id] // [num_offsets][0][id] size_t offset_diff = (num_existing_offsets - num_offsets); size_t start_index = i + 1 + offset_diff; while(start_index < length - offset_diff) { id_offsets[start_index] = id_offsets[start_index + offset_diff]; start_index++; } } id_offsets[i] = num_offsets; for(size_t j = 0; j < num_offsets; j++) { id_offsets[i + 1 + j] = offsets[j]; } id_offsets[i+1+num_offsets] = id; break; } else if(existing_id > id) { extra_length_needed = (num_offsets + 2); if(length + extra_length_needed > capacity) { // enough storage should have been provided upstream return (length + extra_length_needed) - capacity; } // shift index [i..length-1] by `extra_length_needed` positions int64_t shift_index = length + extra_length_needed - 1; while((shift_index - extra_length_needed) >= 0 && shift_index >= i) { // [*1 1 4] [1 1 7] // [1 1 3] id_offsets[shift_index] = id_offsets[shift_index - extra_length_needed]; shift_index--; } // now store the new offsets in the shifted space id_offsets[i++] = num_offsets; for (size_t j = 0; j < num_offsets; j++) { id_offsets[i+j] = offsets[j]; } i += num_offsets; id_offsets[i++] = id; ids_length++; break; } i += num_existing_offsets + 2; } length += extra_length_needed; // extra_length_needed can be negative here but that's okay } return 0; } void compact_posting_list_t::erase(const uint32_t id) { // locate position and shift contents to collapse space vacated size_t i = 0; while(i < length) { size_t num_existing_offsets = id_offsets[i]; size_t existing_id = id_offsets[i + num_existing_offsets + 1]; if(existing_id > id) { // not found! return ; } if(existing_id == id) { size_t shift_offset = num_existing_offsets + 2; while(i+shift_offset < length) { id_offsets[i] = id_offsets[i+shift_offset]; i++; } length -= shift_offset; ids_length--; break; } i += num_existing_offsets + 2; } } compact_posting_list_t* compact_posting_list_t::create(uint32_t num_ids, const uint32_t* ids, const uint32_t* offset_index, uint32_t num_offsets, const uint32_t* offsets) { // format: num_offsets, offset1,..,offsetn, id1 | num_offsets, offset1,..,offsetn, id2 size_t length_required = num_offsets + (2 * num_ids); compact_posting_list_t* pl = (compact_posting_list_t*) malloc(sizeof(compact_posting_list_t) + (length_required * sizeof(uint32_t))); pl->length = 0; pl->capacity = length_required; pl->ids_length = 0; for(size_t i = 0; i < num_ids; i++) { uint32_t start_offset = offset_index[i]; uint32_t next_start_offset = (i == num_ids-1) ? num_offsets : offset_index[i+1]; pl->upsert(ids[i], offsets+start_offset, (next_start_offset - start_offset)); } return pl; } posting_list_t* compact_posting_list_t::to_full_posting_list() const { posting_list_t* pl = new posting_list_t(posting_t::MAX_BLOCK_ELEMENTS); size_t i = 0; while(i < length) { size_t num_existing_offsets = id_offsets[i]; i++; std::vector<uint32_t> offsets(num_existing_offsets); for(size_t j = 0; j < num_existing_offsets; j++) { auto offset = id_offsets[i + j]; offsets[j] = offset; } size_t existing_id = id_offsets[i + num_existing_offsets]; pl->upsert(existing_id, offsets); i += num_existing_offsets + 1; } return pl; } uint32_t compact_posting_list_t::last_id() { return (length == 0) ? UINT32_MAX : id_offsets[length - 1]; } uint32_t compact_posting_list_t::num_ids() const { return ids_length; } uint32_t compact_posting_list_t::first_id() { if(length == 0) { return 0; } return id_offsets[id_offsets[0] + 1]; } bool compact_posting_list_t::contains(uint32_t id) { size_t i = 0; while(i < length) { size_t num_existing_offsets = id_offsets[i]; size_t existing_id = id_offsets[i + num_existing_offsets + 1]; if(existing_id > id) { // not found! return false; } if(existing_id == id) { return true; } i += num_existing_offsets + 2; } return false; } bool compact_posting_list_t::contains_atleast_one(const uint32_t* target_ids, size_t target_ids_size) { size_t i = 0; size_t target_ids_index = 0; while(i < length && target_ids_index < target_ids_size) { size_t num_existing_offsets = id_offsets[i]; size_t existing_id = id_offsets[i + num_existing_offsets + 1]; // Returns iterator to the first element that is >= to value or last if no such element is found. size_t found_index = std::lower_bound(target_ids + target_ids_index, target_ids + target_ids_size, existing_id) - target_ids; if(found_index == target_ids_size) { // all elements are lesser than lowest value (existing_id), so we can stop looking return false; } else { if(target_ids[found_index] == existing_id) { return true; } // adjust lower bound to found_index+1 whose value is >= `existing_id` target_ids_index = found_index; } i += num_existing_offsets + 2; } return false; } /* posting operations */ void posting_t::upsert(void*& obj, uint32_t id, const std::vector<uint32_t>& offsets) { if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = (compact_posting_list_t*) RAW_POSTING_PTR(obj); int64_t extra_capacity_required = list->upsert(id, offsets); if(extra_capacity_required == 0) { // upsert succeeded return; } if((list->capacity + extra_capacity_required) > COMPACT_LIST_THRESHOLD_LENGTH) { // we have to convert to a full posting list posting_list_t* full_list = list->to_full_posting_list(); free(list); obj = full_list; } else { // grow the container by 30% size_t new_capacity = std::min<size_t>((list->capacity + extra_capacity_required) * 1.3, COMPACT_LIST_THRESHOLD_LENGTH); size_t new_capacity_bytes = sizeof(compact_posting_list_t) + (new_capacity * sizeof(uint32_t)); auto new_list = (compact_posting_list_t *) realloc(list, new_capacity_bytes); if(new_list == nullptr) { abort(); } list = new_list; list->capacity = new_capacity; obj = SET_COMPACT_POSTING(list); list->upsert(id, offsets); return ; } } // either `obj` is already a full list or was converted to a full list above posting_list_t* list = (posting_list_t*)(obj); list->upsert(id, offsets); } void posting_t::erase(void*& obj, uint32_t id) { if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = COMPACT_POSTING_PTR(obj); list->erase(id); // if the list becomes too small, we resize it to save memory if(list->length < list->capacity/2) { // resize container size_t new_capacity = list->capacity/2; size_t new_capacity_bytes = sizeof(compact_posting_list_t) + (new_capacity * sizeof(uint32_t)); auto new_list = (compact_posting_list_t *) realloc(list, new_capacity_bytes); if(new_list == nullptr) { abort(); } list = new_list; list->capacity = new_capacity; obj = SET_COMPACT_POSTING(list); } } else { posting_list_t* list = (posting_list_t*)(obj); list->erase(id); if(list->num_blocks() == 1 && ((2 * list->get_root()->size()) + list->get_root()->offsets.getLength()) <= COMPACT_LIST_THRESHOLD_LENGTH) { // convert to compact posting format auto root_block = list->get_root(); auto ids = root_block->ids.uncompress(); auto offset_index = root_block->offset_index.uncompress(); auto offsets = root_block->offsets.uncompress(); compact_posting_list_t* compact_list = compact_posting_list_t::create( root_block->size(), ids, offset_index, root_block->offsets.getLength(), offsets ); delete [] ids; delete [] offset_index; delete [] offsets; delete list; obj = SET_COMPACT_POSTING(compact_list); } } } uint32_t posting_t::num_ids(const void* obj) { if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = COMPACT_POSTING_PTR(obj); return list->num_ids(); } else { posting_list_t* list = (posting_list_t*)(obj); return list->num_ids(); } } uint32_t posting_t::first_id(const void* obj) { if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = COMPACT_POSTING_PTR(obj); return list->first_id(); } else { posting_list_t* list = (posting_list_t*)(obj); return list->first_id(); } } bool posting_t::contains(const void* obj, uint32_t id) { if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = COMPACT_POSTING_PTR(obj); return list->contains(id); } else { posting_list_t* list = (posting_list_t*)(obj); return list->contains(id); } } bool posting_t::contains_atleast_one(const void* obj, const uint32_t* target_ids, size_t target_ids_size) { if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = COMPACT_POSTING_PTR(obj); return list->contains_atleast_one(target_ids, target_ids_size); } else { posting_list_t* list = (posting_list_t*)(obj); return list->contains_atleast_one(target_ids, target_ids_size); } } void posting_t::merge(const std::vector<void*>& raw_posting_lists, std::vector<uint32_t>& result_ids) { // we will have to convert the compact posting list (if any) to full form std::vector<posting_list_t*> plists; std::vector<posting_list_t*> expanded_plists; to_expanded_plists(raw_posting_lists, plists, expanded_plists); posting_list_t::merge(plists, result_ids); for(posting_list_t* expanded_plist: expanded_plists) { delete expanded_plist; } } void posting_t::intersect(const std::vector<void*>& raw_posting_lists, std::vector<uint32_t>& result_ids, const uint32_t& context_ids_length, const uint32_t* context_ids) { if (context_ids_length != 0) { if (raw_posting_lists.empty()) { return; } for (uint32_t i = 0; i < context_ids_length; i++) { bool is_present = true; for (auto const& raw_posting_list: raw_posting_lists) { if (!contains(raw_posting_list, context_ids[i])) { is_present = false; break; } } if (is_present) { result_ids.push_back(context_ids[i]); } } return; } // we will have to convert the compact posting list (if any) to full form std::vector<posting_list_t*> plists; std::vector<posting_list_t*> expanded_plists; to_expanded_plists(raw_posting_lists, plists, expanded_plists); posting_list_t::intersect(plists, result_ids); for(auto expanded_plist: expanded_plists) { delete expanded_plist; } } void posting_t::to_expanded_plists(const std::vector<void*>& raw_posting_lists, std::vector<posting_list_t*>& plists, std::vector<posting_list_t*>& expanded_plists) { for(size_t i = 0; i < raw_posting_lists.size(); i++) { auto raw_posting_list = raw_posting_lists[i]; if(IS_COMPACT_POSTING(raw_posting_list)) { auto compact_posting_list = COMPACT_POSTING_PTR(raw_posting_list); posting_list_t* full_posting_list = compact_posting_list->to_full_posting_list(); plists.emplace_back(full_posting_list); expanded_plists.push_back(full_posting_list); } else { posting_list_t* full_posting_list = (posting_list_t*)(raw_posting_list); plists.emplace_back(full_posting_list); } } } void posting_t::destroy_list(void*& obj) { if(obj == nullptr) { return; } if(IS_COMPACT_POSTING(obj)) { compact_posting_list_t* list = COMPACT_POSTING_PTR(obj); free(list); // assigned via malloc, so must be free()d } else { posting_list_t* list = (posting_list_t*)(obj); delete list; } obj = nullptr; } void posting_t::get_array_token_positions(uint32_t id, const std::vector<void*>& raw_posting_lists, std::map<size_t, std::vector<token_positions_t>>& array_token_positions) { std::vector<posting_list_t*> plists; std::vector<posting_list_t*> expanded_plists; to_expanded_plists(raw_posting_lists, plists, expanded_plists); std::vector<posting_list_t::iterator_t> its; for(posting_list_t* pl: plists) { its.push_back(pl->new_iterator()); its.back().skip_to(id); if(!its.back().valid() || its.back().id() != id) { its.pop_back(); } } if(!its.empty()) { posting_list_t::get_offsets(its, array_token_positions); } for(posting_list_t* expanded_plist: expanded_plists) { delete expanded_plist; } } void posting_t::get_exact_matches(const std::vector<void*>& raw_posting_lists, const bool field_is_array, const uint32_t* ids, const uint32_t num_ids, uint32_t*& exact_ids, size_t& num_exact_ids) { std::vector<posting_list_t*> plists; std::vector<posting_list_t*> expanded_plists; to_expanded_plists(raw_posting_lists, plists, expanded_plists); std::vector<posting_list_t::iterator_t> its; for(posting_list_t* pl: plists) { its.push_back(pl->new_iterator()); } posting_list_t::get_exact_matches(its, field_is_array, ids, num_ids, exact_ids, num_exact_ids); for(posting_list_t* expanded_plist: expanded_plists) { delete expanded_plist; } } void posting_t::get_matching_array_indices(const std::vector<void*>& raw_posting_lists, uint32_t id, std::vector<size_t>& indices) { std::vector<posting_list_t*> plists; std::vector<posting_list_t*> expanded_plists; to_expanded_plists(raw_posting_lists, plists, expanded_plists); std::vector<posting_list_t::iterator_t> its; for(posting_list_t* pl: plists) { its.push_back(pl->new_iterator()); } posting_list_t::get_matching_array_indices(id, its, indices); for(posting_list_t* expanded_plist: expanded_plists) { delete expanded_plist; } } void posting_t::get_or_iterator(void*& raw_posting_list, std::vector<or_iterator_t>& or_iterators, std::vector<posting_list_t*>& expanded_plists) { if(IS_COMPACT_POSTING(raw_posting_list)) { auto compact_posting_list = COMPACT_POSTING_PTR(raw_posting_list); posting_list_t* full_posting_list = compact_posting_list->to_full_posting_list(); expanded_plists.emplace_back(full_posting_list); std::vector<posting_list_t::iterator_t> its; its.push_back(full_posting_list->new_iterator(nullptr, nullptr, 0)); or_iterators.emplace_back(or_iterator_t(its)); } else { posting_list_t* full_posting_list = (posting_list_t*)(raw_posting_list); std::vector<posting_list_t::iterator_t> its; its.push_back(full_posting_list->new_iterator(nullptr, nullptr, 0)); or_iterators.emplace_back(or_iterator_t(its)); } } void posting_t::get_phrase_matches(const std::vector<void*>& raw_posting_lists, bool field_is_array, const uint32_t* ids, uint32_t num_ids, uint32_t*& phrase_ids, size_t& num_phrase_ids) { std::vector<posting_list_t*> plists; std::vector<posting_list_t*> expanded_plists; to_expanded_plists(raw_posting_lists, plists, expanded_plists); std::vector<posting_list_t::iterator_t> its; for(posting_list_t* pl: plists) { its.push_back(pl->new_iterator()); } posting_list_t::get_phrase_matches(its, field_is_array, ids, num_ids, phrase_ids, num_phrase_ids); for(posting_list_t* expanded_plist: expanded_plists) { delete expanded_plist; } }
19,881
C++
.cpp
452
33.736726
146
0.572205
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,799
typesense_server_utils.cpp
typesense_typesense/src/typesense_server_utils.cpp
#include <cstdlib> #include <curl/curl.h> #include <gflags/gflags.h> #include <dlfcn.h> #include <brpc/controller.h> #include <brpc/server.h> #include <braft/raft.h> #include <raft_server.h> #include <fstream> #include <execinfo.h> #include <http_client.h> #include <arpa/inet.h> #include <sys/socket.h> #include <ifaddrs.h> #include <butil/files/file_enumerator.h> #include "analytics_manager.h" #include "housekeeper.h" #include "core_api.h" #include "ratelimit_manager.h" #include "embedder_manager.h" #include "typesense_server_utils.h" #include "threadpool.h" #include "stopwords_manager.h" #include "conversation_manager.h" #include "vq_model_manager.h" #ifndef ASAN_BUILD #include "jemalloc.h" #endif #include "stackprinter.h" HttpServer* server; std::atomic<bool> quit_raft_service; extern "C" { // weak symbol: resolved at runtime by the linker if we are using jemalloc, nullptr otherwise #ifdef __APPLE__ int je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) __attribute__((weak_import)); #else int mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) __attribute__((weak)); #endif } bool using_jemalloc() { // On OSX, jemalloc API is prefixed with "je_" #ifdef __APPLE__ return (je_mallctl != nullptr); #else return (mallctl != nullptr); #endif } void catch_interrupt(int sig) { LOG(INFO) << "Stopping Typesense server..."; signal(sig, SIG_IGN); // ignore for now as we want to shut down elegantly quit_raft_service = true; } void init_cmdline_options(cmdline::parser & options, int argc, char **argv) { options.set_program_name("./typesense-server"); options.add<std::string>("data-dir", 'd', "Directory where data will be stored.", true); options.add<std::string>("api-key", 'a', "API key that allows all operations.", true); options.add<std::string>("search-only-api-key", 's', "[DEPRECATED: use API key management end-point] API key that allows only searches.", false); options.add<std::string>("health-rusage-api-key", '\0', "API key that allows access to health end-point with resource usage.", false); options.add<std::string>("analytics-dir", '\0', "Directory where Analytics will be stored.", false); options.add<uint32_t>("analytics-db-ttl", '\0', "TTL in seconds for events stored in analytics db", false); options.add<uint32_t>("analytics-minute-rate-limit", '\0', "per minute rate limit for /events endpoint", false); options.add<std::string>("api-address", '\0', "Address to which Typesense API service binds.", false, "0.0.0.0"); options.add<uint32_t>("api-port", '\0', "Port on which Typesense API service listens.", false, 8108); options.add<std::string>("peering-address", '\0', "Internal IP address to which Typesense peering service binds.", false, ""); options.add<uint32_t>("peering-port", '\0', "Port on which Typesense peering service listens.", false, 8107); options.add<std::string>("peering-subnet", '\0', "Internal subnet that Typesense should use for peering.", false, ""); options.add<std::string>("nodes", '\0', "Path to file containing comma separated string of all nodes in the cluster.", false); options.add<std::string>("ssl-certificate", 'c', "Path to the SSL certificate file.", false, ""); options.add<std::string>("ssl-certificate-key", 'k', "Path to the SSL certificate key file.", false, ""); options.add<uint32_t>("ssl-refresh-interval-seconds", '\0', "Frequency of automatic reloading of SSL certs from disk.", false, 8 * 60 * 60); options.add<bool>("enable-cors", '\0', "Enable CORS requests.", false, true); options.add<std::string>("cors-domains", '\0', "Comma separated list of domains that are allowed for CORS.", false, ""); options.add<float>("max-memory-ratio", '\0', "Maximum fraction of system memory to be used.", false, 1.0f); options.add<int>("snapshot-interval-seconds", '\0', "Frequency of replication log snapshots.", false, 3600); options.add<int>("snapshot-max-byte-count-per-rpc", '\0', "Maximum snapshot file size in bytes transferred for each RPC.", false, 4194304); options.add<size_t>("healthy-read-lag", '\0', "Reads are rejected if the updates lag behind this threshold.", false, 1000); options.add<size_t>("healthy-write-lag", '\0', "Writes are rejected if the updates lag behind this threshold.", false, 500); options.add<int>("log-slow-requests-time-ms", '\0', "When >= 0, requests that take longer than this duration are logged.", false, -1); options.add<uint32_t>("num-collections-parallel-load", '\0', "Number of collections that are loaded in parallel during start up.", false, 4); options.add<uint32_t>("num-documents-parallel-load", '\0', "Number of documents per collection that are indexed in parallel during start up.", false, 1000); options.add<uint32_t>("thread-pool-size", '\0', "Number of threads used for handling concurrent requests.", false, 4); options.add<std::string>("log-dir", '\0', "Path to the log directory.", false, ""); options.add<std::string>("config", '\0', "Path to the configuration file.", false, ""); options.add<bool>("enable-access-logging", '\0', "Enable access logging.", false, false); options.add<bool>("enable-search-logging", '\0', "Enable search logging.", false, false); options.add<bool>("enable-search-analytics", '\0', "Enable search analytics.", false, false); options.add<int>("disk-used-max-percentage", '\0', "Reject writes when used disk space exceeds this percentage. Default: 100 (never reject).", false, 100); options.add<int>("memory-used-max-percentage", '\0', "Reject writes when memory usage exceeds this percentage. Default: 100 (never reject).", false, 100); options.add<bool>("skip-writes", '\0', "Skip all writes except config changes. Default: false.", false, false); options.add<bool>("reset-peers-on-error", '\0', "Reset node's peers on clustering error. Default: false.", false, false); options.add<int>("log-slow-searches-time-ms", '\0', "When >= 0, searches that take longer than this duration are logged.", false, 30*1000); options.add<int>("cache-num-entries", '\0', "Number of entries to cache.", false, 1000); options.add<uint32_t>("analytics-flush-interval", '\0', "Frequency of persisting analytics data to disk (in seconds).", false, 3600); options.add<uint32_t>("housekeeping-interval", '\0', "Frequency of housekeeping background job (in seconds).", false, 1800); options.add<bool>("enable-lazy-filter", '\0', "Filter clause will be evaluated lazily.", false, false); options.add<uint32_t>("db-compaction-interval", '\0', "Frequency of RocksDB compaction (in seconds).", false, 604800); options.add<uint16_t>("filter-by-max-ops", '\0', "Maximum number of operations permitted in filtery_by.", false, Config::FILTER_BY_DEFAULT_OPERATIONS); options.add<int>("max-per-page", '\0', "Max number of hits per page", false, 250); // DEPRECATED options.add<std::string>("listen-address", 'h', "[DEPRECATED: use `api-address`] Address to which Typesense API service binds.", false, "0.0.0.0"); options.add<uint32_t>("listen-port", 'p', "[DEPRECATED: use `api-port`] Port on which Typesense API service listens.", false, 8108); options.add<std::string>("master", 'm', "[DEPRECATED: use clustering via --nodes] Master's address in http(s)://<master_address>:<master_port> format " "to start as read-only replica.", false, ""); } int init_root_logger(Config & config, const std::string & server_version) { google::InitGoogleLogging("typesense"); std::string log_dir = config.get_log_dir(); if(log_dir.empty()) { // use console logger if log dir is not specified FLAGS_logtostderr = true; } else { if(!directory_exists(log_dir)) { std::cerr << "Typesense failed to start. " << "Log directory " << log_dir << " does not exist."; return 1; } // flush log levels above -1 immediately (INFO=0) FLAGS_logbuflevel = -1; // available only on glog master (ensures that log file name is constant) FLAGS_timestamp_in_logfile_name = false; std::string log_path = log_dir + "/" + "typesense.log"; // will log levels INFO **and above** to the given log file google::SetLogDestination(google::INFO, log_path.c_str()); // don't create symlink for INFO log google::SetLogSymlink(google::INFO, ""); // don't create separate log files for each level google::SetLogDestination(google::WARNING, ""); google::SetLogDestination(google::ERROR, ""); google::SetLogDestination(google::FATAL, ""); std::cout << "Log directory is configured as: " << log_dir << std::endl; } return 0; } bool is_private_ip(uint32_t ip) { uint8_t b1, b2; b1 = (uint8_t) (ip >> 24); b2 = (uint8_t) ((ip >> 16) & 0x0ff); // 10.x.y.z if (b1 == 10) { return true; } // 172.16.0.0 - 172.31.255.255 if ((b1 == 172) && (b2 >= 16) && (b2 <= 31)) { return true; } // 192.168.0.0 - 192.168.255.255 if ((b1 == 192) && (b2 == 168)) { return true; } return false; } const char* get_internal_ip(const std::string& subnet_cidr) { struct ifaddrs *ifap; getifaddrs(&ifap); uint32_t netip = 0, netbits = 0; if(!subnet_cidr.empty()) { std::vector<std::string> subnet_parts; StringUtils::split(subnet_cidr, subnet_parts, "/"); if(subnet_parts.size() == 2) { butil::ip_t subnet_addr; auto res = butil::str2ip(subnet_parts[0].c_str(), &subnet_addr); if(res == 0) { netip = subnet_addr.s_addr; if(StringUtils::is_uint32_t(subnet_parts[1])) { netbits = std::stoll(subnet_parts[1]); } } } } if(netip != 0 && netbits != 0) { LOG(INFO) << "Using subnet ip: " << netip << ", bits: " << netbits; } for(auto ifa = ifap; ifa; ifa = ifa->ifa_next) { if (ifa->ifa_addr && ifa->ifa_addr->sa_family==AF_INET) { auto sa = (struct sockaddr_in *) ifa->ifa_addr; auto ipaddr = sa->sin_addr.s_addr; if(is_private_ip(ntohl(ipaddr))) { if(netip != 0 && netbits != 0) { unsigned int mask = 0xFFFFFFFF << (32 - netbits); if((ntohl(netip) & mask) != (ntohl(ipaddr) & mask)) { LOG(INFO) << "Skipping interface " << ifa->ifa_name << " as it does not match peering subnet."; continue; } } char *ip = inet_ntoa(sa->sin_addr); freeifaddrs(ifap); return ip; } } } LOG(WARNING) << "Found no matching interfaces, using loopback address as internal IP."; freeifaddrs(ifap); return "127.0.0.1"; } int start_raft_server(ReplicationState& replication_state, Store& store, const std::string& state_dir, const std::string& path_to_nodes, const std::string& peering_address, uint32_t peering_port, const std::string& peering_subnet, uint32_t api_port, int snapshot_interval_seconds, int snapshot_max_byte_count_per_rpc, const std::atomic<bool>& reset_peers_on_error) { if(path_to_nodes.empty()) { LOG(INFO) << "Since no --nodes argument is provided, starting a single node Typesense cluster."; } const Option<std::string>& nodes_config_op = Config::fetch_nodes_config(path_to_nodes); if(!nodes_config_op.ok()) { LOG(ERROR) << nodes_config_op.error(); return -1; } butil::ip_t peering_ip; int ip_conv_status = 0; if(!peering_address.empty()) { ip_conv_status = butil::str2ip(peering_address.c_str(), &peering_ip); } else { const char* internal_ip = get_internal_ip(peering_subnet); ip_conv_status = butil::str2ip(internal_ip, &peering_ip); } if(ip_conv_status != 0) { LOG(ERROR) << "Failed to parse peering address `" << peering_address << "`"; return -1; } butil::EndPoint peering_endpoint(peering_ip, peering_port); // start peering server brpc::Server raft_server; if (braft::add_service(&raft_server, peering_endpoint) != 0) { LOG(ERROR) << "Failed to add peering service"; exit(-1); } if (raft_server.Start(peering_endpoint, nullptr) != 0) { LOG(ERROR) << "Failed to start peering service"; exit(-1); } size_t election_timeout_ms = 5000; if (replication_state.start(peering_endpoint, api_port, election_timeout_ms, snapshot_max_byte_count_per_rpc, state_dir, nodes_config_op.get(), quit_raft_service) != 0) { LOG(ERROR) << "Failed to start peering state"; exit(-1); } LOG(INFO) << "Typesense peering service is running on " << raft_server.listen_address(); LOG(INFO) << "Snapshot interval configured as: " << snapshot_interval_seconds << "s"; LOG(INFO) << "Snapshot max byte count configured as: " << snapshot_max_byte_count_per_rpc; // Wait until 'CTRL-C' is pressed. then Stop() and Join() the service size_t raft_counter = 0; while (!brpc::IsAskedToQuit() && !quit_raft_service.load()) { if(raft_counter % 10 == 0) { // reset peer configuration periodically to identify change in cluster membership const Option<std::string> & refreshed_nodes_op = Config::fetch_nodes_config(path_to_nodes); if(!refreshed_nodes_op.ok()) { LOG(WARNING) << "Error while refreshing peer configuration: " << refreshed_nodes_op.error(); } else { const std::string& nodes_config = ReplicationState::to_nodes_config(peering_endpoint, api_port, refreshed_nodes_op.get()); replication_state.refresh_nodes(nodes_config, raft_counter, reset_peers_on_error); if(raft_counter % 60 == 0) { replication_state.do_snapshot(nodes_config); } } } if(raft_counter % 3 == 0) { // update node catch up status periodically, take care of logging too verbosely bool log_msg = (raft_counter % 9 == 0); replication_state.refresh_catchup_status(log_msg); } raft_counter++; sleep(1); } LOG(INFO) << "Typesense peering service is going to quit."; // Stop application before server replication_state.shutdown(); LOG(INFO) << "raft_server.stop()"; raft_server.Stop(0); LOG(INFO) << "raft_server.join()"; raft_server.Join(); LOG(INFO) << "Typesense peering service has quit."; return 0; } int run_server(const Config & config, const std::string & version, void (*master_server_routes)()) { LOG(INFO) << "Starting Typesense " << version << std::flush; #ifndef ASAN_BUILD if(using_jemalloc()) { LOG(INFO) << "Typesense is using jemalloc."; // Due to time based decay depending on application not being idle-ish, set `background_thread` // to help with releasing memory back to the OS and improve tail latency. // See: https://github.com/jemalloc/jemalloc/issues/1398 bool background_thread = true; #ifdef __APPLE__ je_mallctl("background_thread", nullptr, nullptr, &background_thread, sizeof(bool)); #elif __linux__ mallctl("background_thread", nullptr, nullptr, &background_thread, sizeof(bool)); #endif } else { LOG(WARNING) << "Typesense is NOT using jemalloc."; } #endif quit_raft_service = false; if(!directory_exists(config.get_data_dir())) { LOG(ERROR) << "Typesense failed to start. " << "Data directory " << config.get_data_dir() << " does not exist."; return 1; } if (config.get_enable_search_analytics() && !config.get_analytics_dir().empty() && !directory_exists(config.get_analytics_dir())) { LOG(INFO) << "Analytics directory " << config.get_analytics_dir() << " does not exist, will create it..."; if(!create_directory(config.get_analytics_dir())) { LOG(ERROR) << "Could not create analytics directory. Quitting."; return 1; } } if(!config.get_master().empty()) { LOG(ERROR) << "The --master option has been deprecated. Please use clustering for high availability. " << "Look for the --nodes configuration in the documentation."; return 1; } if(!config.get_search_only_api_key().empty()) { LOG(WARNING) << "!!!! WARNING !!!!"; LOG(WARNING) << "The --search-only-api-key has been deprecated. " "The API key generation end-point should be used for generating keys with specific ACL."; } std::string data_dir = config.get_data_dir(); std::string db_dir = config.get_data_dir() + "/db"; std::string state_dir = config.get_data_dir() + "/state"; std::string meta_dir = config.get_data_dir() + "/meta"; std::string analytics_dir = config.get_analytics_dir(); int32_t analytics_db_ttl = config.get_analytics_db_ttl(); uint32_t analytics_minute_rate_limit = config.get_analytics_minute_rate_limit(); size_t thread_pool_size = config.get_thread_pool_size(); const size_t proc_count = std::max<size_t>(1, std::thread::hardware_concurrency()); const size_t num_threads = thread_pool_size == 0 ? (proc_count * 8) : thread_pool_size; size_t num_collections_parallel_load = config.get_num_collections_parallel_load(); num_collections_parallel_load = (num_collections_parallel_load == 0) ? (proc_count * 4) : num_collections_parallel_load; LOG(INFO) << "Thread pool size: " << num_threads; ThreadPool app_thread_pool(num_threads); ThreadPool server_thread_pool(num_threads); ThreadPool replication_thread_pool(num_threads); // primary DB used for storing the documents: we will not use WAL since Raft provides that Store store(db_dir, 24*60*60, 1024, true); // meta DB for storing house keeping things Store meta_store(meta_dir, 24*60*60, 1024, false); Store* analytics_store = nullptr; if(!analytics_dir.empty()) { // Analytics DB for storing analytics events // We want to keep rocksdb files inside a `db` directory inside `analytics_dir`. // Need to handle missing db subdir from older versions by creating and moving files inside std::string analytics_db_dir = analytics_dir + "/db"; if(!directory_exists(analytics_db_dir)) { create_directory(analytics_db_dir); butil::FileEnumerator analytics_dir_enum(butil::FilePath(analytics_dir), false, butil::FileEnumerator::FILES); for (butil::FilePath file = analytics_dir_enum.Next(); !file.empty(); file = analytics_dir_enum.Next()) { butil::FilePath dest_path(analytics_db_dir + "/" + file.BaseName().value()); butil::Move(file, dest_path); } } analytics_store = new Store(analytics_db_dir, 24*60*60, 1024, true, analytics_db_ttl); } AnalyticsManager::get_instance().init(&store, analytics_store, analytics_minute_rate_limit); curl_global_init(CURL_GLOBAL_SSL); HttpClient & httpClient = HttpClient::get_instance(); httpClient.init(config.get_api_key()); server = new HttpServer( version, config.get_api_address(), config.get_api_port(), config.get_ssl_cert(), config.get_ssl_cert_key(), config.get_ssl_refresh_interval_seconds() * 1000, config.get_enable_cors(), config.get_cors_domains(), &server_thread_pool ); server->set_auth_handler(handle_authentication); server->on(HttpServer::STREAM_RESPONSE_MESSAGE, HttpServer::on_stream_response_message); server->on(HttpServer::REQUEST_PROCEED_MESSAGE, HttpServer::on_request_proceed_message); server->on(HttpServer::DEFER_PROCESSING_MESSAGE, HttpServer::on_deferred_processing_message); bool ssl_enabled = (!config.get_ssl_cert().empty() && !config.get_ssl_cert_key().empty()); BatchedIndexer* batch_indexer = new BatchedIndexer(server, &store, &meta_store, num_threads, config, config.get_skip_writes()); CollectionManager & collectionManager = CollectionManager::get_instance(); collectionManager.init(&store, &app_thread_pool, config.get_max_memory_ratio(), config.get_api_key(), quit_raft_service, config.get_filter_by_max_ops()); StopwordsManager& stopwordsManager = StopwordsManager::get_instance(); stopwordsManager.init(&store); RateLimitManager *rateLimitManager = RateLimitManager::getInstance(); auto rate_limit_manager_init = rateLimitManager->init(&meta_store); if(!rate_limit_manager_init.ok()) { LOG(INFO) << "Failed to initialize rate limit manager: " << rate_limit_manager_init.error(); } EmbedderManager::set_model_dir(config.get_data_dir() + "/models"); // first we start the peering service ReplicationState replication_state(server, batch_indexer, &store, analytics_store, &replication_thread_pool, server->get_message_dispatcher(), ssl_enabled, &config, num_collections_parallel_load, config.get_num_documents_parallel_load()); auto conversations_init = ConversationManager::get_instance().init(&replication_state); if(!conversations_init.ok()) { LOG(INFO) << "Failed to initialize conversation manager: " << conversations_init.error(); } std::thread raft_thread([&replication_state, &store, &config, &state_dir, &app_thread_pool, &server_thread_pool, &replication_thread_pool, batch_indexer]() { std::thread batch_indexing_thread([batch_indexer]() { batch_indexer->run(); }); std::thread event_sink_thread([&replication_state]() { AnalyticsManager::get_instance().run(&replication_state); }); std::thread conversation_garbage_collector_thread([]() { LOG(INFO) << "Conversation garbage collector thread started."; ConversationManager::get_instance().run(); }); HouseKeeper::get_instance().init(); std::thread housekeeping_thread([]() { HouseKeeper::get_instance().run(); }); RemoteEmbedder::init(&replication_state); std::string path_to_nodes = config.get_nodes(); start_raft_server(replication_state, store, state_dir, path_to_nodes, config.get_peering_address(), config.get_peering_port(), config.get_peering_subnet(), config.get_api_port(), config.get_snapshot_interval_seconds(), config.get_snapshot_max_byte_count_per_rpc(), config.get_reset_peers_on_error()); LOG(INFO) << "Shutting down batch indexer..."; batch_indexer->stop(); LOG(INFO) << "Waiting for batch indexing thread to be done..."; batch_indexing_thread.join(); LOG(INFO) << "Shutting down event sink thread..."; AnalyticsManager::get_instance().stop(); LOG(INFO) << "Waiting for event sink thread to be done..."; event_sink_thread.join(); LOG(INFO) << "Shutting down conversation garbage collector thread..."; ConversationManager::get_instance().stop(); LOG(INFO) << "Waiting for conversation garbage collector thread to be done..."; conversation_garbage_collector_thread.join(); LOG(INFO) << "Waiting for housekeeping thread to be done..."; HouseKeeper::get_instance().stop(); housekeeping_thread.join(); LOG(INFO) << "Shutting down server_thread_pool"; server_thread_pool.shutdown(); LOG(INFO) << "Shutting down app_thread_pool."; app_thread_pool.shutdown(); LOG(INFO) << "Shutting down replication_thread_pool."; replication_thread_pool.shutdown(); server->stop(); }); LOG(INFO) << "Starting API service..."; master_server_routes(); int ret_code = server->run(&replication_state); // we are out of the event loop here LOG(INFO) << "Typesense API service has quit."; quit_raft_service = true; // we set this once again in case API thread crashes instead of a signal raft_thread.join(); LOG(INFO) << "Deleting batch indexer"; delete batch_indexer; LOG(INFO) << "CURL clean up"; curl_global_cleanup(); LOG(INFO) << "Deleting server"; delete server; LOG(INFO) << "CollectionManager dispose, this might take some time..."; // We have to delete the models here, before CUDA driver is unloaded. VQModelManager::get_instance().delete_all_models(); CollectionManager::get_instance().dispose(); delete analytics_store; LOG(INFO) << "Bye."; return ret_code; }
25,655
C++
.cpp
467
46.321199
160
0.630068
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,800
array_base.cpp
typesense_typesense/src/array_base.cpp
#include "array_base.h" uint32_t* array_base::uncompress(uint32_t len) const { uint32_t actual_len = std::max(len, length); uint32_t *out = new uint32_t[actual_len]; for_uncompress(in, out, length); return out; } uint32_t array_base::getSizeInBytes() { return size_bytes; } uint32_t array_base::getLength() const { return length; } uint32_t array_base::getMin() const { return min; } uint32_t array_base::getMax() const { return max; }
473
C++
.cpp
19
21.947368
54
0.690423
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,801
art.cpp
typesense_typesense/src/art.cpp
#include <stdlib.h> #if defined(__x86_64__) #include <emmintrin.h> #elif defined(__aarch64__) #include <sse2neon.h> #endif #include <string.h> #include <stdio.h> #include <assert.h> #include <art.h> #include <functional> #include <chrono> #include <algorithm> #include <iostream> #include <limits> #include <queue> #include <list> #include <stdint.h> #include <posting.h> #include <or_iterator.h> #include "art.h" #include "logger.h" #include "array_utils.h" #include "filter_result_iterator.h" /** * Macros to manipulate pointer tags */ #define IS_LEAF(x) (((uintptr_t)x & 1)) #define SET_LEAF(x) ((void*)((uintptr_t)x | 1)) #define LEAF_RAW(x) ((void*)((uintptr_t)x & ~1)) #define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c))) #ifdef IGNORE_PRINTF #define printf(fmt, ...) (0) #endif #define microseconds std::chrono::duration_cast<std::chrono::microseconds> #define USE_FREQUENCY_SCORE INT64_MIN enum recurse_progress { RECURSE, ABORT, ITERATE }; static void art_fuzzy_recurse(unsigned char p, unsigned char c, const art_node *n, int depth, const unsigned char *term, const int term_len, const int* irow, const int* jrow, const int min_cost, const int max_cost, const bool prefix, std::vector<const art_node *> &results); void art_int_fuzzy_recurse(art_node *n, int depth, const unsigned char* int_str, int int_str_len, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results); bool compare_art_leaf_frequency(const art_leaf *a, const art_leaf *b) { return posting_t::num_ids(a->values) > posting_t::num_ids(b->values); } bool compare_art_leaf_score(const art_leaf *a, const art_leaf *b) { return a->max_score > b->max_score; } bool compare_art_node_frequency(const art_node *a, const art_node *b) { uint32_t a_value = 0, b_value = 0; if(IS_LEAF(a)) { art_leaf* al = (art_leaf *) LEAF_RAW(a); a_value = posting_t::num_ids(al->values); } if(IS_LEAF(b)) { art_leaf* bl = (art_leaf *) LEAF_RAW(b); b_value = posting_t::num_ids(bl->values); } return a_value > b_value; } bool compare_art_node_score(const art_node* a, const art_node* b) { int64_t a_value = std::numeric_limits<int64_t>::min(), b_value = std::numeric_limits<int64_t>::min(); if(IS_LEAF(a)) { art_leaf* al = (art_leaf *) LEAF_RAW(a); a_value = al->max_score; } else { a_value = a->max_score; } if(IS_LEAF(b)) { art_leaf* bl = (art_leaf *) LEAF_RAW(b); b_value = bl->max_score; } else { b_value = b->max_score; } return a_value > b_value; } bool compare_art_node_frequency_pq(const art_node *a, const art_node *b) { return !compare_art_node_frequency(a, b); } bool compare_art_node_score_pq(const art_node* a, const art_node* b) { return !compare_art_node_score(a, b); } /** * Allocates a node of the given type, * initializes to zero and sets the type. */ static art_node* alloc_node(uint8_t type) { art_node* n; switch (type) { case NODE4: n = (art_node *) calloc(1, sizeof(art_node4)); break; case NODE16: n = (art_node *) calloc(1, sizeof(art_node16)); break; case NODE48: n = (art_node *) calloc(1, sizeof(art_node48)); break; case NODE256: n = (art_node *) calloc(1, sizeof(art_node256)); break; default: abort(); } n->type = type; n->max_score = 0; return n; } /** * Initializes an ART tree * @return 0 on success. */ int art_tree_init(art_tree *t) { t->root = NULL; t->size = 0; return 0; } // Recursively destroys the tree static void destroy_node(art_node *n) { // Break if null if (!n) return; // Special case leafs if (IS_LEAF(n)) { art_leaf *leaf = (art_leaf *) LEAF_RAW(n); posting_t::destroy_list(leaf->values); free(leaf); return; } // Handle each node type int i; union { art_node4 *p1; art_node16 *p2; art_node48 *p3; art_node256 *p4; } p; switch (n->type) { case NODE4: p.p1 = (art_node4*)n; for (i=0;i<n->num_children;i++) { destroy_node(p.p1->children[i]); } break; case NODE16: p.p2 = (art_node16*)n; for (i=0;i<n->num_children;i++) { destroy_node(p.p2->children[i]); } break; case NODE48: p.p3 = (art_node48*)n; for (i=0;i<48;i++) { destroy_node(p.p3->children[i]); } break; case NODE256: p.p4 = (art_node256*)n; for (i=0;i<256;i++) { if (p.p4->children[i]) destroy_node(p.p4->children[i]); } break; default: abort(); } // Free ourself on the way up free(n); } /** * Destroys an ART tree * @return 0 on success. */ int art_tree_destroy(art_tree *t) { destroy_node(t->root); return 0; } /** * Returns the size of the ART tree. */ #ifndef BROKEN_GCC_C99_INLINE extern inline uint64_t art_size(art_tree *t); void compare_and_match_leaf(const unsigned char *int_str, int int_str_len, const NUM_COMPARATOR &comparator, std::vector<const art_leaf *> &results, const art_leaf *l); #endif static art_node** find_child(art_node *n, unsigned char c) { int i, mask, bitfield; union { art_node4 *p1; art_node16 *p2; art_node48 *p3; art_node256 *p4; } p; switch (n->type) { case NODE4: p.p1 = (art_node4*)n; for (i=0;i < n->num_children; i++) { if (p.p1->keys[i] == c) return &p.p1->children[i]; } break; { __m128i cmp; case NODE16: p.p2 = (art_node16*)n; // Compare the key to all 16 stored keys cmp = _mm_cmpeq_epi8(_mm_set1_epi8(c), _mm_loadu_si128((__m128i*)p.p2->keys)); // Use a mask to ignore children that don't exist mask = (1 << n->num_children) - 1; bitfield = _mm_movemask_epi8(cmp) & mask; /* * If we have a match (any bit set) then we can * return the pointer match using ctz to get * the index. */ if (bitfield) return &p.p2->children[__builtin_ctz(bitfield)]; break; } case NODE48: p.p3 = (art_node48*)n; i = p.p3->keys[c]; if (i) return &p.p3->children[i-1]; break; case NODE256: p.p4 = (art_node256*)n; if (p.p4->children[c]) return &p.p4->children[c]; break; default: abort(); } return NULL; } // Simple inlined if static inline int min(int a, int b) { return (a < b) ? a : b; } /** * Returns the number of prefix characters shared between * the key and node. */ static int check_prefix(const art_node *n, const unsigned char *key, int key_len, int depth) { int max_cmp = min(min(n->partial_len, MAX_PREFIX_LEN), key_len - depth); int idx; for (idx=0; idx < max_cmp; idx++) { if (n->partial[idx] != key[depth+idx]) return idx; } return idx; } /** * Checks if a leaf matches * @return 0 on success. */ static int leaf_matches(const art_leaf *n, const unsigned char *key, int key_len, int depth) { (void)depth; // Fail if the key lengths are different if (n->key_len != (uint32_t)key_len) return 1; // Compare the keys starting at the depth return memcmp(n->key, key, key_len); } /** * Searches for a value in the ART tree * @arg t The tree * @arg key The key * @arg key_len The length of the key * @return NULL if the item was not found, otherwise * the value pointer is returned. */ void* art_search(const art_tree *t, const unsigned char *key, int key_len) { art_node **child; art_node *n = t->root; int prefix_len, depth = 0; while (n) { // Might be a leaf if (IS_LEAF(n)) { n = (art_node *) LEAF_RAW(n); // Check if the expanded path matches if (!leaf_matches((art_leaf*)n, key, key_len, depth)) { return ((art_leaf*)n); } return NULL; } // Bail if the prefix does not match if (n->partial_len) { prefix_len = check_prefix(n, key, key_len, depth); if (prefix_len != min(MAX_PREFIX_LEN, n->partial_len)) { return NULL; } depth = depth + n->partial_len; if(depth >= key_len) { return NULL; } } assert(depth < key_len); // Recursively search child = find_child(n, key[depth]); n = (child) ? *child : NULL; depth++; } return NULL; } // Find the minimum leaf under a node static art_leaf* minimum(const art_node *n) { // Handle base cases if (!n) return NULL; if (IS_LEAF(n)) return (art_leaf *) LEAF_RAW(n); int idx; switch (n->type) { case NODE4: return minimum(((art_node4*)n)->children[0]); case NODE16: return minimum(((art_node16*)n)->children[0]); case NODE48: idx=0; while (!((art_node48*)n)->keys[idx]) idx++; idx = ((art_node48*)n)->keys[idx] - 1; return minimum(((art_node48*)n)->children[idx]); case NODE256: idx=0; while (!((art_node256*)n)->children[idx]) idx++; return minimum(((art_node256*)n)->children[idx]); default: abort(); } } // Find the maximum leaf under a node static art_leaf* maximum(const art_node *n) { // Handle base cases if (!n) return NULL; if (IS_LEAF(n)) return (art_leaf *) LEAF_RAW(n); int idx; switch (n->type) { case NODE4: return maximum(((art_node4*)n)->children[n->num_children-1]); case NODE16: return maximum(((art_node16*)n)->children[n->num_children-1]); case NODE48: idx=255; while (!((art_node48*)n)->keys[idx]) idx--; idx = ((art_node48*)n)->keys[idx] - 1; return maximum(((art_node48*)n)->children[idx]); case NODE256: idx=255; while (!((art_node256*)n)->children[idx]) idx--; return maximum(((art_node256*)n)->children[idx]); default: abort(); } } /** * Returns the minimum valued leaf */ art_leaf* art_minimum(art_tree *t) { return minimum((art_node*)t->root); } /** * Returns the maximum valued leaf */ art_leaf* art_maximum(art_tree *t) { return maximum((art_node*)t->root); } static void add_document_to_leaf(art_document *document, art_leaf *leaf) { leaf->max_score = MAX(leaf->max_score, document->score); posting_t::upsert(leaf->values, document->id, document->offsets); if(document->score == USE_FREQUENCY_SCORE) { leaf->max_score = posting_t::num_ids(leaf->values); } } static art_leaf* make_leaf(const unsigned char *key, uint32_t key_len, art_document *document) { art_leaf *l = (art_leaf *) malloc(sizeof(art_leaf) + key_len); l->key_len = key_len; l->max_score = document->score; uint32_t ids[1] = {document->id}; uint32_t offset_index[1] = {0}; if((2 + document->offsets.size()) <= posting_t::COMPACT_LIST_THRESHOLD_LENGTH) { compact_posting_list_t* list = compact_posting_list_t::create(1, ids, offset_index, document->offsets.size(), &document->offsets[0]); l->values = SET_COMPACT_POSTING(list); } else { posting_list_t* pl = new posting_list_t(posting_t::MAX_BLOCK_ELEMENTS); pl->upsert(document->id, document->offsets); l->values = pl; } memcpy(l->key, key, key_len); add_document_to_leaf(document, l); return l; } static uint32_t longest_common_prefix(art_leaf *l1, art_leaf *l2, int depth) { int max_cmp = min(l1->key_len, l2->key_len) - depth; int idx; for (idx=0; idx < max_cmp; idx++) { if (l1->key[depth+idx] != l2->key[depth+idx]) return idx; } return idx; } static void copy_header(art_node *dest, art_node *src) { dest->num_children = src->num_children; dest->partial_len = src->partial_len; dest->max_score = src->max_score; memcpy(dest->partial, src->partial, min(MAX_PREFIX_LEN, src->partial_len)); } static void add_child256(art_node256 *n, art_node **ref, unsigned char c, void *child) { (void)ref; n->n.num_children++; n->children[c] = (art_node *) child; n->n.max_score = MAX(n->n.max_score, ((art_leaf *) LEAF_RAW(child))->max_score); } static void add_child48(art_node48 *n, art_node **ref, unsigned char c, void *child) { if (n->n.num_children < 48) { int pos = 0; while (n->children[pos]) pos++; n->children[pos] = (art_node *) child; n->keys[c] = pos + 1; n->n.num_children++; n->n.max_score = MAX(n->n.max_score, ((art_leaf *) LEAF_RAW(child))->max_score); } else { art_node256 *new_n = (art_node256*)alloc_node(NODE256); for (int i=0;i<256;i++) { if (n->keys[i]) { new_n->children[i] = n->children[n->keys[i] - 1]; } } copy_header((art_node*)new_n, (art_node*)n); *ref = (art_node*)new_n; free(n); add_child256(new_n, ref, c, child); } } static void add_child16(art_node16 *n, art_node **ref, unsigned char c, void *child) { if (n->n.num_children < 16) { __m128i cmp; // Compare the key to all 16 stored keys cmp = _mm_cmplt_epi8(_mm_set1_epi8(c), _mm_loadu_si128((__m128i*)n->keys)); // Use a mask to ignore children that don't exist unsigned mask = (1 << n->n.num_children) - 1; unsigned bitfield = _mm_movemask_epi8(cmp) & mask; // Check if less than any unsigned idx; if (bitfield) { idx = __builtin_ctz(bitfield); memmove(n->keys+idx+1,n->keys+idx,n->n.num_children-idx); memmove(n->children+idx+1,n->children+idx, (n->n.num_children-idx)*sizeof(void*)); } else idx = n->n.num_children; // Set the child n->keys[idx] = c; n->children[idx] = (art_node *) child; n->n.num_children++; n->n.max_score = MAX(n->n.max_score, ((art_leaf *) LEAF_RAW(child))->max_score); } else { art_node48 *new_n = (art_node48*)alloc_node(NODE48); // Copy the child pointers and populate the key map memcpy(new_n->children, n->children, sizeof(void*)*n->n.num_children); for (int i=0;i<n->n.num_children;i++) { new_n->keys[n->keys[i]] = i + 1; } copy_header((art_node*)new_n, (art_node*)n); *ref = (art_node*)new_n; free(n); add_child48(new_n, ref, c, child); } } static void add_child4(art_node4 *n, art_node **ref, unsigned char c, void *child) { if (n->n.num_children < 4) { int idx; for (idx=0; idx < n->n.num_children; idx++) { if (c < n->keys[idx]) break; } // Shift to make room memmove(n->keys+idx+1, n->keys+idx, n->n.num_children - idx); memmove(n->children+idx+1, n->children+idx, (n->n.num_children - idx)*sizeof(void*)); n->keys[idx] = c; n->children[idx] = (art_node *) child; n->n.num_children++; n->n.max_score = MAX(n->n.max_score, ((art_leaf *) LEAF_RAW(child))->max_score); } else { art_node16 *new_n = (art_node16*)alloc_node(NODE16); // Copy the child pointers and the key map memcpy(new_n->children, n->children, sizeof(void*)*n->n.num_children); memcpy(new_n->keys, n->keys, sizeof(unsigned char)*n->n.num_children); copy_header((art_node*)new_n, (art_node*)n); *ref = (art_node*)new_n; free(n); add_child16(new_n, ref, c, child); } } static void add_child(art_node *n, art_node **ref, unsigned char c, void *child) { switch (n->type) { case NODE4: return add_child4((art_node4*)n, ref, c, child); case NODE16: return add_child16((art_node16*)n, ref, c, child); case NODE48: return add_child48((art_node48*)n, ref, c, child); case NODE256: return add_child256((art_node256*)n, ref, c, child); default: abort(); } } /** * Calculates the index at which the prefixes mismatch */ static int prefix_mismatch(const art_node *n, const unsigned char *key, int key_len, int depth) { int max_cmp = min(min(MAX_PREFIX_LEN, n->partial_len), key_len - depth); int idx; for (idx=0; idx < max_cmp; idx++) { if (n->partial[idx] != key[depth+idx]) return idx; } // If the prefix is short we can avoid finding a leaf if (n->partial_len > MAX_PREFIX_LEN) { // Prefix is longer than what we've checked, find a leaf art_leaf *l = minimum(n); max_cmp = min(l->key_len, key_len)- depth; for (; idx < max_cmp; idx++) { if (l->key[idx+depth] != key[depth+idx]) return idx; } } return idx; } static void* recursive_insert(art_node* n, art_node** ref, const unsigned char* key, uint32_t key_len, const int64_t docs_max_score, std::vector<art_document>& documents, int depth, std::list<art_node*>& path, int* old) { // If we are at a NULL node, inject a leaf if (!n) { art_leaf* new_leaf = make_leaf(key, key_len, &documents[0]); for(size_t i = 1; i < documents.size(); i++) { add_document_to_leaf(&documents[i], new_leaf); } *ref = (art_node*)SET_LEAF(new_leaf); return NULL; } // If we are at a leaf, we need to replace it with a node if (IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); // Check if we are updating an existing value if (!leaf_matches(l, key, key_len, depth)) { *old = 1; for(size_t i = 0; i < documents.size(); i++) { add_document_to_leaf(&documents[i], l); } return l->values; } // New value, we must split the leaf into a node4 art_node4 *new_n = (art_node4*)alloc_node(NODE4); // Create a new leaf art_leaf *l2 = make_leaf(key, key_len, &documents[0]); uint32_t longest_prefix = longest_common_prefix(l, l2, depth); new_n->n.partial_len = longest_prefix; memcpy(new_n->n.partial, key+depth, min(MAX_PREFIX_LEN, longest_prefix)); for(size_t i = 1; i < documents.size(); i++) { add_document_to_leaf(&documents[i], l2); } // Add the leafs to the new node4 *ref = (art_node*)new_n; add_child4(new_n, ref, l->key[depth+longest_prefix], SET_LEAF(l)); add_child4(new_n, ref, l2->key[depth+longest_prefix], SET_LEAF(l2)); return NULL; } if(docs_max_score != USE_FREQUENCY_SCORE) { n->max_score = MAX(n->max_score, docs_max_score); } // Check if given node has a prefix if (n->partial_len) { // Determine if the prefixes differ, since we need to split int prefix_diff = prefix_mismatch(n, key, key_len, depth); if ((uint32_t)prefix_diff >= n->partial_len) { depth += n->partial_len; goto RECURSE_SEARCH; } // Create a new node art_node4 *new_n = (art_node4*)alloc_node(NODE4); *ref = (art_node*)new_n; new_n->n.partial_len = prefix_diff; memcpy(new_n->n.partial, n->partial, min(MAX_PREFIX_LEN, prefix_diff)); // Adjust the prefix of the old node if (n->partial_len <= MAX_PREFIX_LEN) { add_child4(new_n, ref, n->partial[prefix_diff], n); n->partial_len -= (prefix_diff+1); memmove(n->partial, n->partial+prefix_diff+1, min(MAX_PREFIX_LEN, n->partial_len)); } else { n->partial_len -= (prefix_diff+1); art_leaf *l = minimum(n); add_child4(new_n, ref, l->key[depth+prefix_diff], n); memcpy(n->partial, l->key+depth+prefix_diff+1, min(MAX_PREFIX_LEN, n->partial_len)); } // Insert the new leaf art_leaf *l = make_leaf(key, key_len, &documents[0]); for(size_t i = 1; i < documents.size(); i++) { add_document_to_leaf(&documents[i], l); } add_child4(new_n, ref, key[depth+prefix_diff], SET_LEAF(l)); path.push_back(*ref); return NULL; } RECURSE_SEARCH:; // Find a child to recurse to art_node **child = find_child(n, key[depth]); if (child) { return recursive_insert(*child, child, key, key_len, docs_max_score, documents, depth + 1, path, old); } // No child, node goes within us art_leaf *l = make_leaf(key, key_len, &documents[0]); for(size_t i = 1; i < documents.size(); i++) { add_document_to_leaf(&documents[i], l); } add_child(n, ref, key[depth], SET_LEAF(l)); path.push_back(*ref); return NULL; } /** * Inserts a new value into the ART tree * @arg t The tree * @arg key The key * @arg key_len The length of the key * @arg value Opaque value. * @return NULL if the item was newly inserted, otherwise * the old value pointer is returned. */ void* art_insert(art_tree *t, const unsigned char *key, int key_len, art_document* document) { std::vector<art_document> documents = {*document}; return art_inserts(t, key, key_len, document->score, documents); } void* art_inserts(art_tree *t, const unsigned char *key, int key_len, const int64_t docs_max_score, std::vector<art_document>& documents) { int old_val = 0; std::list<art_node*> path; bool frequency_based_ordering = (docs_max_score == USE_FREQUENCY_SCORE); void *old = recursive_insert(t->root, &t->root, key, key_len, docs_max_score, documents, 0, path, &old_val); if (!old_val) t->size++; if(frequency_based_ordering) { for(art_node* n: path) { n->max_score = MAX(n->max_score, docs_max_score); } } return old; } static void remove_child256(art_node256 *n, art_node **ref, unsigned char c) { n->children[c] = NULL; n->n.num_children--; // Resize to a node48 on underflow, not immediately to prevent // trashing if we sit on the 48/49 boundary if (n->n.num_children == 37) { art_node48 *new_n = (art_node48*)alloc_node(NODE48); *ref = (art_node*)new_n; copy_header((art_node*)new_n, (art_node*)n); int pos = 0; for (int i=0;i<256;i++) { if (n->children[i]) { new_n->children[pos] = n->children[i]; new_n->keys[i] = pos + 1; pos++; } } free(n); } } static void remove_child48(art_node48 *n, art_node **ref, unsigned char c) { int pos = n->keys[c]; n->keys[c] = 0; n->children[pos-1] = NULL; n->n.num_children--; if (n->n.num_children == 12) { art_node16 *new_n = (art_node16*)alloc_node(NODE16); *ref = (art_node*)new_n; copy_header((art_node*)new_n, (art_node*)n); int child = 0; for (int i=0;i<256;i++) { pos = n->keys[i]; if (pos) { new_n->keys[child] = i; new_n->children[child] = n->children[pos - 1]; child++; } } free(n); } } static void remove_child16(art_node16 *n, art_node **ref, art_node **l) { int pos = l - n->children; memmove(n->keys+pos, n->keys+pos+1, n->n.num_children - 1 - pos); memmove(n->children+pos, n->children+pos+1, (n->n.num_children - 1 - pos)*sizeof(void*)); n->n.num_children--; if (n->n.num_children == 3) { art_node4 *new_n = (art_node4*)alloc_node(NODE4); *ref = (art_node*)new_n; copy_header((art_node*)new_n, (art_node*)n); memcpy(new_n->keys, n->keys, 4); memcpy(new_n->children, n->children, 4*sizeof(void*)); free(n); } } static void remove_child4(art_node4 *n, art_node **ref, art_node **l) { int pos = l - n->children; memmove(n->keys+pos, n->keys+pos+1, n->n.num_children - 1 - pos); memmove(n->children+pos, n->children+pos+1, (n->n.num_children - 1 - pos)*sizeof(void*)); n->n.num_children--; // Remove nodes with only a single child if (n->n.num_children == 1) { art_node *child = n->children[0]; if (!IS_LEAF(child)) { // Concatenate the prefixes int prefix = n->n.partial_len; if (prefix < MAX_PREFIX_LEN) { n->n.partial[prefix] = n->keys[0]; prefix++; } if (prefix < MAX_PREFIX_LEN) { int sub_prefix = min(child->partial_len, MAX_PREFIX_LEN - prefix); memcpy(n->n.partial+prefix, child->partial, sub_prefix); prefix += sub_prefix; } // Store the prefix in the child memcpy(child->partial, n->n.partial, min(prefix, MAX_PREFIX_LEN)); child->partial_len += n->n.partial_len + 1; } *ref = child; free(n); } } static void remove_child(art_node *n, art_node **ref, unsigned char c, art_node **l) { switch (n->type) { case NODE4: return remove_child4((art_node4*)n, ref, l); case NODE16: return remove_child16((art_node16*)n, ref, l); case NODE48: return remove_child48((art_node48*)n, ref, c); case NODE256: return remove_child256((art_node256*)n, ref, c); default: abort(); } } static art_leaf* recursive_delete(art_node *n, art_node **ref, const unsigned char *key, int key_len, int depth) { // Search terminated if (!n) return NULL; // Handle hitting a leaf node if (IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); if (!leaf_matches(l, key, key_len, depth)) { *ref = NULL; return l; } return NULL; } // Bail if the prefix does not match if (n->partial_len) { int prefix_len = check_prefix(n, key, key_len, depth); if (prefix_len != min(MAX_PREFIX_LEN, n->partial_len)) { return NULL; } depth = depth + n->partial_len; if(depth >= key_len) { return NULL; } } assert(depth < key_len); // Find child node art_node **child = find_child(n, key[depth]); if (!child) return NULL; // If the child is leaf, delete from this node if (IS_LEAF(*child)) { art_leaf *l = (art_leaf *) LEAF_RAW(*child); if (!leaf_matches(l, key, key_len, depth)) { remove_child(n, ref, key[depth], child); return l; } return NULL; // Recurse } else { return recursive_delete(*child, child, key, key_len, depth+1); } } /** * Deletes a value from the ART tree * @arg t The tree * @arg key The key * @arg key_len The length of the key * @return NULL if the item was not found, otherwise * the value pointer is returned. */ void* art_delete(art_tree *t, const unsigned char *key, int key_len) { art_leaf *l = recursive_delete(t->root, &t->root, key, key_len, 0); if (l) { t->size--; void *old = l->values; free(l); return old; } return NULL; } /*static uint32_t get_score(art_node* child) { if (IS_LEAF(child)) { art_leaf *l = (art_leaf *) LEAF_RAW(child); return l->values->ids.getLength(); } return child->max_token_count; }*/ const uint32_t* get_allowed_doc_ids(art_tree *t, const std::string& prev_token, const uint32_t* filter_ids, const size_t filter_ids_length, size_t& prev_token_doc_ids_len) { art_leaf* prev_leaf = static_cast<art_leaf*>( art_search(t, reinterpret_cast<const unsigned char*>(prev_token.c_str()), prev_token.size() + 1) ); if(prev_token.empty() || !prev_leaf) { prev_token_doc_ids_len = filter_ids_length; return filter_ids; } std::vector<uint32_t> prev_leaf_ids; posting_t::merge({prev_leaf->values}, prev_leaf_ids); uint32_t* prev_token_doc_ids = nullptr; if(filter_ids_length != 0) { prev_token_doc_ids_len = ArrayUtils::and_scalar(prev_leaf_ids.data(), prev_leaf_ids.size(), filter_ids, filter_ids_length, &prev_token_doc_ids); } else { prev_token_doc_ids_len = prev_leaf_ids.size(); prev_token_doc_ids = new uint32_t[prev_token_doc_ids_len]; std::copy(prev_leaf_ids.begin(), prev_leaf_ids.end(), prev_token_doc_ids); } return prev_token_doc_ids; } bool validate_and_add_leaf(art_leaf* leaf, const bool last_token, const std::string& prev_token, const uint32_t* allowed_doc_ids, const size_t allowed_doc_ids_len, std::set<std::string>& exclude_leaves, const art_leaf* exact_leaf, std::vector<art_leaf *>& results) { if(leaf == exact_leaf) { return false; } std::string tok(reinterpret_cast<char*>(leaf->key), leaf->key_len - 1); if(exclude_leaves.count(tok) != 0) { return false; } if(allowed_doc_ids_len != 0) { if(!posting_t::contains_atleast_one(leaf->values, allowed_doc_ids, allowed_doc_ids_len)) { return false; } } exclude_leaves.emplace(tok); results.push_back(leaf); return true; } bool validate_and_add_leaf(art_leaf* leaf, const std::string& prev_token, const art_leaf* prev_leaf, const art_leaf* exact_leaf, filter_result_iterator_t* const filter_result_iterator, std::set<std::string>& exclude_leaves, std::vector<art_leaf *>& results) { if(leaf == exact_leaf) { return false; } std::string tok(reinterpret_cast<char*>(leaf->key), leaf->key_len - 1); if(exclude_leaves.count(tok) != 0) { return false; } if(prev_token.empty() || !prev_leaf) { if (filter_result_iterator->validity == filter_result_iterator_t::valid && !filter_result_iterator->contains_atleast_one(leaf->values)) { return false; } } else { std::vector<or_iterator_t> or_iterators; std::vector<posting_list_t*> expanded_plists; posting_t::get_or_iterator(const_cast<art_leaf*&>(prev_leaf)->values, or_iterators, expanded_plists); posting_t::get_or_iterator(leaf->values, or_iterators, expanded_plists); auto found = or_iterator_t::contains_atleast_one(or_iterators, result_iter_state_t(nullptr, 0, filter_result_iterator)); for (auto& item: expanded_plists) { delete item; } if (!found) { return false; } } exclude_leaves.emplace(tok); results.push_back(leaf); return true; } int art_topk_iter(const art_node *root, token_ordering token_order, size_t max_results, const art_leaf* exact_leaf, const bool last_token, const std::string& prev_token, const uint32_t* allowed_doc_ids, size_t allowed_doc_ids_len, const art_tree* t, std::set<std::string>& exclude_leaves, std::vector<art_leaf *>& results) { printf("INSIDE art_topk_iter: root->type: %d\n", root->type); std::priority_queue<const art_node *, std::vector<const art_node *>, decltype(&compare_art_node_score_pq)> q(compare_art_node_score_pq); if(token_order == FREQUENCY) { q = std::priority_queue<const art_node *, std::vector<const art_node *>, decltype(&compare_art_node_frequency_pq)>(compare_art_node_frequency_pq); } q.push(root); size_t num_processed = 0; while(!q.empty() && results.size() < max_results*4) { art_node *n = (art_node *) q.top(); q.pop(); /*if (IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); LOG(INFO) << "Top node (leaf) score: " << l->max_score; } else { LOG(INFO) << "Top node score: " << n->max_score; }*/ if (!n) continue; if (IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); //LOG(INFO) << "END LEAF SCORE: " << l->max_score; validate_and_add_leaf(l, last_token, prev_token, allowed_doc_ids, allowed_doc_ids_len, exclude_leaves, exact_leaf, results); if (++num_processed % 1024 == 0 && (microseconds( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us) { search_cutoff = true; break; } continue; } int idx; switch (n->type) { case NODE4: //LOG(INFO) << "NODE4, SCORE: " << n->max_score; for (int i=0; i < n->num_children; i++) { art_node* child = ((art_node4*)n)->children[i]; q.push(child); } break; case NODE16: //LOG(INFO) << "NODE16, SCORE: " << n->max_score; for (int i=0; i < n->num_children; i++) { q.push(((art_node16*)n)->children[i]); } break; case NODE48: //LOG(INFO) << "NODE48, SCORE: " << n->max_score; for (int i=0; i < 256; i++) { idx = ((art_node48*)n)->keys[i]; if (!idx) continue; art_node *child = ((art_node48*)n)->children[idx - 1]; q.push(child); } break; case NODE256: //LOG(INFO) << "NODE256, SCORE: " << n->max_score; for (int i=0; i < 256; i++) { if (!((art_node256*)n)->children[i]) continue; q.push(((art_node256*)n)->children[i]); } break; default: printf("ABORTING BECAUSE OF UNKNOWN NODE TYPE: %d\n", n->type); abort(); } } /*LOG(INFO) << "leaf results.size: " << results.size() << ", filter_ids_length: " << filter_ids_length << ", num_large_lists: " << num_large_lists;*/ printf("OUTSIDE art_topk_iter: results size: %d\n", results.size()); return 0; } int art_topk_iter(const art_node *root, token_ordering token_order, size_t max_results, const art_leaf* exact_leaf, const bool last_token, const std::string& prev_token, filter_result_iterator_t* const filter_result_iterator, const art_tree* t, std::set<std::string>& exclude_leaves, std::vector<art_leaf *>& results) { // printf("INSIDE art_topk_iter: root->type: %d\n", root->type); auto prev_leaf = static_cast<art_leaf*>( art_search(t, reinterpret_cast<const unsigned char*>(prev_token.c_str()), prev_token.size() + 1) ); std::priority_queue<const art_node *, std::vector<const art_node *>, decltype(&compare_art_node_score_pq)> q(compare_art_node_score_pq); if(token_order == FREQUENCY) { q = std::priority_queue<const art_node *, std::vector<const art_node *>, decltype(&compare_art_node_frequency_pq)>(compare_art_node_frequency_pq); } q.push(root); size_t num_processed = 0; while(!q.empty() && results.size() < max_results*4) { art_node *n = (art_node *) q.top(); q.pop(); if (!n) continue; if (IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); //LOG(INFO) << "END LEAF SCORE: " << l->max_score; validate_and_add_leaf(l, prev_token, prev_leaf, exact_leaf, filter_result_iterator, exclude_leaves, results); filter_result_iterator->reset(); if (filter_result_iterator->validity == filter_result_iterator_t::timed_out || (++num_processed % 1024 == 0 && (microseconds( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us)) { search_cutoff = true; break; } continue; } int idx; switch (n->type) { case NODE4: //LOG(INFO) << "NODE4, SCORE: " << n->max_score; for (int i=0; i < n->num_children; i++) { art_node* child = ((art_node4*)n)->children[i]; q.push(child); } break; case NODE16: //LOG(INFO) << "NODE16, SCORE: " << n->max_score; for (int i=0; i < n->num_children; i++) { q.push(((art_node16*)n)->children[i]); } break; case NODE48: //LOG(INFO) << "NODE48, SCORE: " << n->max_score; for (int i=0; i < 256; i++) { idx = ((art_node48*)n)->keys[i]; if (!idx) continue; art_node *child = ((art_node48*)n)->children[idx - 1]; q.push(child); } break; case NODE256: //LOG(INFO) << "NODE256, SCORE: " << n->max_score; for (int i=0; i < 256; i++) { if (!((art_node256*)n)->children[i]) continue; q.push(((art_node256*)n)->children[i]); } break; default: printf("ABORTING BECAUSE OF UNKNOWN NODE TYPE: %d\n", n->type); abort(); } } /*LOG(INFO) << "leaf results.size: " << results.size() << ", filter_ids_length: " << filter_ids_length << ", num_large_lists: " << num_large_lists;*/ printf("OUTSIDE art_topk_iter: results size: %d\n", results.size()); return 0; } // Recursively iterates over the tree static int recursive_iter(art_node *n, art_callback cb, void *data) { // Handle base cases if (!n) return 0; if (IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); //printf("REC LEAF len: %d, key: %s\n", l->key_len, l->key); return cb(data, (const unsigned char*)l->key, l->key_len, l->values); } //printf("INTERNAL LEAF children: %d, partial_len: %d, partial: %s\n", n->num_children, n->partial_len, n->partial); int idx, res; switch (n->type) { case NODE4: for (int i=0; i < n->num_children; i++) { //printf("INTERNAL LEAF key[i]: %c\n", ((art_node4*)n)->keys[i]); res = recursive_iter(((art_node4*)n)->children[i], cb, data); if (res) return res; } break; case NODE16: for (int i=0; i < n->num_children; i++) { res = recursive_iter(((art_node16*)n)->children[i], cb, data); if (res) return res; } break; case NODE48: for (int i=0; i < 256; i++) { idx = ((art_node48*)n)->keys[i]; if (!idx) continue; res = recursive_iter(((art_node48*)n)->children[idx-1], cb, data); if (res) return res; } break; case NODE256: for (int i=0; i < 256; i++) { if (!((art_node256*)n)->children[i]) continue; res = recursive_iter(((art_node256*)n)->children[i], cb, data); if (res) return res; } break; default: abort(); } return 0; } /** * Iterates through the entries pairs in the map, * invoking a callback for each. The call back gets a * key, value for each and returns an integer stop value. * If the callback returns non-zero, then the iteration stops. * @arg t The tree to iterate over * @arg cb The callback function to invoke * @arg data Opaque handle passed to the callback * @return 0 on success, or the return of the callback. */ int art_iter(art_tree *t, art_callback cb, void *data) { return recursive_iter(t->root, cb, data); } /** * Checks if a leaf prefix matches * @return 0 on success. */ static int leaf_prefix_matches(const art_leaf *n, const unsigned char *prefix, int prefix_len) { // Fail if the key length is too short if (n->key_len < (uint32_t)prefix_len) return 1; // Compare the keys return memcmp(n->key, prefix, prefix_len); } /** * Iterates through the entries pairs in the map, * invoking a callback for each that matches a given prefix. * The call back gets a key, value for each and returns an integer stop value. * If the callback returns non-zero, then the iteration stops. * @arg t The tree to iterate over * @arg prefix The prefix of keys to read * @arg prefix_len The length of the prefix * @arg cb The callback function to invoke * @arg data Opaque handle passed to the callback * @return 0 on success, or the return of the callback. */ int art_iter_prefix(art_tree *t, const unsigned char *key, int key_len, art_callback cb, void *data) { art_node **child; art_node *n = t->root; int prefix_len, depth = 0; while (n) { //printf("partial_len: %d\n", n->num_children); // Might be a leaf if (IS_LEAF(n)) { n = (art_node *) LEAF_RAW(n); printf("RAW LEAF len: %d, children: %d\n", n->partial_len, n->num_children); // Check if the expanded path matches if (!leaf_prefix_matches((art_leaf*)n, key, key_len)) { art_leaf *l = (art_leaf*)n; return cb(data, (const unsigned char*)l->key, l->key_len, l->values); } return 0; } printf("IS_INTERNAL\n"); printf("Prefix len: %d, children: %d, depth: %d, partial: %s\n", n->partial_len, n->num_children, depth, n->partial); // If the depth matches the prefix, we need to handle this node if (depth == key_len) { art_leaf *l = minimum(n); printf("DEPTH LEAF len: %d, key: %s\n", l->key_len, l->key); if (!leaf_prefix_matches(l, key, key_len)) return recursive_iter(n, cb, data); return 0; } // Bail if the prefix does not match if (n->partial_len) { prefix_len = prefix_mismatch(n, key, key_len, depth); // Guard if the mis-match is longer than the MAX_PREFIX_LEN if (prefix_len > n->partial_len) { prefix_len = n->partial_len; } // If there is no match, search is terminated if (!prefix_len) { return 0; } else if (depth + prefix_len == key_len) { // If we've matched the prefix, iterate on this node return recursive_iter(n, cb, data); } else if(depth + n->partial_len >= key_len) { return 0; } // if there is a full match, go deeper depth = depth + n->partial_len; } assert(depth < key_len); // Recursively search child = find_child(n, key[depth]); n = (child) ? *child : NULL; depth++; } return 0; } void print_row(const int* row, const int row_len) { for(int i=0; i<=row_len; i++) { printf("%d ", row[i]); } printf("\n"); } static inline void copyIntArray2(const int *src, int *dest, const int len) { for(int t=0; t<len; t++) { dest[t] = src[t]; } } static inline void levenshtein_dist(const int depth, const unsigned char p, const unsigned char c, const unsigned char* term, const int term_len, const int* irow, const int* jrow, int* krow) { krow[0] = jrow[0] + 1; // Calculate levenshtein distance incrementally (term => b, column => j, c => a[i], p => a[i-1], irow => d[i-1]): // https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance#Optimal_string_alignment_distance for(int column=1; column<=term_len; column++) { int cost = (c == term[column-1]) ? 0 : 1; // column-1 used because of zero-based char array int delete_cost = jrow[column] + 1; int insert_cost = krow[column - 1] + 1; int substitution_cost = jrow[column - 1] + cost; krow[column] = min(min(insert_cost, delete_cost), substitution_cost); if(depth > 1 && column > 1 && c == term[column-1-1] && p == term[column-1]) { krow[column] = std::min(krow[column], irow[column-2] + 1); } } } static inline void art_fuzzy_children(unsigned char p, const art_node *n, int depth, const unsigned char *term, const int term_len, const int* irow, const int* jrow, const int min_cost, const int max_cost, const bool prefix, std::vector<const art_node *> &results) { char child_char; art_node* child; switch (n->type) { case NODE4: printf("\nNODE4\n"); for (int i=n->num_children-1; i >= 0; i--) { child_char = ((art_node4*)n)->keys[i]; printf("4!child_char: %c, %d, depth: %d\n", child_char, child_char, depth); child = ((art_node4*)n)->children[i]; art_fuzzy_recurse(p, child_char, child, depth, term, term_len, irow, jrow, min_cost, max_cost, prefix, results); } break; case NODE16: printf("\nNODE16\n"); for (int i=n->num_children-1; i >= 0; i--) { child_char = ((art_node16*)n)->keys[i]; printf("16!child_char: %c, depth: %d\n", child_char, depth); child = ((art_node16*)n)->children[i]; art_fuzzy_recurse(p, child_char, child, depth, term, term_len, irow, jrow, min_cost, max_cost, prefix, results); } break; case NODE48: printf("\nNODE48\n"); for (int i=255; i >= 0; i--) { int ix = ((art_node48*)n)->keys[i]; if (!ix) continue; child = ((art_node48*)n)->children[ix - 1]; child_char = (char)i; printf("48!child_char: %c, depth: %d, ix: %d\n", child_char, depth, ix); art_fuzzy_recurse(p, child_char, child, depth, term, term_len, irow, jrow, min_cost, max_cost, prefix, results); } break; case NODE256: printf("\nNODE256\n"); for (int i=255; i >= 0; i--) { if (!((art_node256*)n)->children[i]) continue; child_char = (char) i; printf("256!child_char: %c, depth: %d\n", child_char, depth); child = ((art_node256*)n)->children[i]; art_fuzzy_recurse(p, child_char, child, depth, term, term_len, irow, jrow, min_cost, max_cost, prefix, results); } break; default: abort(); } } static inline void rotate(int &i, int &j, int &k) { int old_i = i; i = j; j = k; k = old_i; } // -1: return without adding, 0 : continue iteration, 1: return after adding static inline int fuzzy_search_state(const bool prefix, int key_index, unsigned char p, unsigned char c, const unsigned char* query, const int query_len, const int* cost_row, int min_cost, int max_cost) { // There are 2 scenarios: // a) key_len < query_len: "pltninum" (query) on "pst" (key) // b) query_len < key_len: "pst" (query) on "pltninum" (key) bool last_key_char = (c == '\0'); int key_len = last_key_char ? key_index : key_index + 1; if(last_key_char) { // Last char, so have to return 1 or -1 if(cost_row[query_len] >= min_cost && cost_row[query_len] <= max_cost) { return 1; } // Special case used to match q=strawberries on key=strawberry (query_len > key_len) // but limit to larger keys to prevent eager matches if(key_len > 5 && query_len > key_len && (query_len - key_len) <= max_cost && cost_row[key_len] >= min_cost && cost_row[key_len] <= max_cost-1) { return 1; } return -1; } // `key_len` can't exceed `query_len` since length of `cost_row` is `query_len + 1` int cost = cost_row[std::min(key_len, query_len)]; if(key_len >= query_len && prefix) { // Case b) // For prefix queries // - we can return early if key_len reaches query_len and cost is within bounds. // - might have to iterate past prefix query length to catch trailing typos. if(cost >= min_cost && cost <= max_cost) { return 1; } } /* Terminate the search early or continue iterating on the key? We have to account for the case that `cost` could momentarily exceed max_cost but resolve later. In such cases, we will compare characters in the query with p and/or c to decide. */ if(cost <= max_cost) { return 0; } if(cost == 2 || cost == 3) { /* [1 letter extra] exam ple exZa mple [1 letter missing] exam ple exmp le [1 letter missing + transpose] dacrycystal gia dacrcyystlg ia */ bool letter_more = (key_index+1 < query_len && query[key_index+1] == c); bool letter_less = (key_index > 0 && query[key_index-1] == c); if(letter_more || letter_less) { return 0; } } if(cost == 3 || cost == 4) { /* [2 letter extra] exam ple eTxT ample abbviat ion abbrevi ation */ bool extra_matching_letters = (key_index + 1 < query_len && p == query[key_index + 1] && key_index + 2 < query_len && c == query[key_index + 2]); if(extra_matching_letters) { return 0; } /* [2 letter missing] exam ple expl e */ bool two_letter_less = (key_index > 1 && query[key_index-2] == c); if(two_letter_less) { return 0; } } return -1; } static void art_fuzzy_recurse(unsigned char p, unsigned char c, const art_node *n, int depth, const unsigned char *term, const int term_len, const int* irow, const int* jrow, const int min_cost, const int max_cost, const bool prefix, std::vector<const art_node *> &results) { if (!n) return ; const int columns = term_len+1; int i=0, j=1, k=2; int row0[columns]; int row1[columns]; int row2[columns]; int* rows[3] = {row0, row1, row2}; copyIntArray2(irow, rows[i], columns); copyIntArray2(jrow, rows[j], columns); if(depth == -1) { // root node depth = 0; } else { // check indexed char first bool last_key_char = (c == '\0'); if(!prefix || !last_key_char) { levenshtein_dist(depth, p, c, term, term_len, rows[i], rows[j], rows[k]); rotate(i, j, k); } int action = fuzzy_search_state(prefix, depth, p, c, term, term_len, rows[j], min_cost, max_cost); if(1 == action) { results.push_back(n); return; } if(action == -1) { return; } p = c; depth++; } // check if node is a leaf if(IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); //std::string leaf_str((const char*)l->key, l->key_len-1); //LOG(INFO) << "leaf key: " << leaf_str; /*if(leaf_str == "illustrations") { LOG(INFO) << "here"; }*/ // look past term_len to deal with trailing typo, e.g. searching "pltinum" on "platinum" @ max_cost = 1 const int iter_len = std::min(int(l->key_len), term_len + max_cost); if(depth >= iter_len) { // when a preceding partial node completely contains the whole leaf (e.g. "[raspberr]y" on "raspberries") int action = fuzzy_search_state(prefix, depth, '\0', '\0', term, term_len, rows[j], min_cost, max_cost); if(action == 1) { results.push_back(n); } return; } // we will iterate through remaining leaf characters while(depth < iter_len) { c = l->key[depth]; bool last_key_char = (c == '\0'); if(!prefix || !last_key_char) { levenshtein_dist(depth, p, c, term, term_len, rows[i], rows[j], rows[k]); printf("leaf char: %c\n", l->key[depth]); printf("cost: %d, depth: %d, term_len: %d\n", temp_cost, depth, term_len); rotate(i, j, k); } int action = fuzzy_search_state(prefix, depth, p, c, term, term_len, rows[j], min_cost, max_cost); if(action == 1) { results.push_back(n); return; } if(action == -1) { return; } p = c; depth++; } return ; } // now check compressed prefix int partial_len = min(MAX_PREFIX_LEN, n->partial_len); //std::string partial_str(reinterpret_cast<const char *>(n->partial), n->partial_len); for (int idx = 0; idx < partial_len; idx++) { c = n->partial[idx]; levenshtein_dist(depth, p, c, term, term_len, rows[i], rows[j], rows[k]); rotate(i, j, k); int action = fuzzy_search_state(prefix, depth, p, c, term, term_len, rows[j], min_cost, max_cost); if(action == 1) { results.push_back(n); return; } if(action == -1) { return; } p = c; depth++; } // Some intermediate path may have been left out if partial_len is truncated: progress the levenshtein matrix while(partial_len < n->partial_len && depth < term_len) { c = term[depth]; levenshtein_dist(depth, p, c, term, term_len, rows[i], rows[j], rows[k]); rotate(i, j, k); int action = fuzzy_search_state(prefix, depth, p, c, term, term_len, rows[j], min_cost, max_cost); if(action == 1) { results.push_back(n); return; } if(action == -1) { return; } p = c; depth++; partial_len++; } art_fuzzy_children(c, n, depth, term, term_len, rows[i], rows[j], min_cost, max_cost, prefix, results); } /** * Returns leaves that match a given string within a fuzzy distance of max_cost. */ int art_fuzzy_search(art_tree *t, const unsigned char *term, const int term_len, const int min_cost, const int max_cost, const size_t max_words, const token_ordering token_order, const bool prefix, bool last_token, const std::string& prev_token, const uint32_t *filter_ids, const size_t filter_ids_length, std::vector<art_leaf *> &results, std::set<std::string>& exclude_leaves) { std::vector<const art_node*> nodes; int irow[term_len + 1]; int jrow[term_len + 1]; for (int i = 0; i <= term_len; i++){ irow[i] = jrow[i] = i; } //auto begin = std::chrono::high_resolution_clock::now(); if(IS_LEAF(t->root)) { art_leaf *l = (art_leaf *) LEAF_RAW(t->root); art_fuzzy_recurse(0, l->key[0], t->root, 0, term, term_len, irow, jrow, min_cost, max_cost, prefix, nodes); } else { if(t->root == nullptr) { return 0; } // send depth as -1 to indicate that this is a root node art_fuzzy_recurse(0, 0, t->root, -1, term, term_len, irow, jrow, min_cost, max_cost, prefix, nodes); } //long long int time_micro = microseconds(std::chrono::high_resolution_clock::now() - begin).count(); //!LOG(INFO) << "Time taken for fuzz: " << time_micro << "us, size of nodes: " << nodes.size(); //auto begin = std::chrono::high_resolution_clock::now(); size_t key_len = prefix ? term_len + 1 : term_len; art_leaf* exact_leaf = (art_leaf *) art_search(t, term, key_len); //LOG(INFO) << "exact_leaf: " << exact_leaf << ", term: " << term << ", term_len: " << term_len; // documents that contain the previous token and/or filter ids size_t allowed_doc_ids_len = 0; const uint32_t* allowed_doc_ids = get_allowed_doc_ids(t, prev_token, filter_ids, filter_ids_length, allowed_doc_ids_len); for(auto node: nodes) { art_topk_iter(node, token_order, max_words, exact_leaf, last_token, prev_token, allowed_doc_ids, allowed_doc_ids_len, t, exclude_leaves, results); } if(token_order == FREQUENCY) { std::sort(results.begin(), results.end(), compare_art_leaf_frequency); } else { std::sort(results.begin(), results.end(), compare_art_leaf_score); } if(exact_leaf && min_cost == 0) { std::string tok(reinterpret_cast<char*>(exact_leaf->key), exact_leaf->key_len - 1); if(exclude_leaves.count(tok) == 0) { results.insert(results.begin(), exact_leaf); exclude_leaves.emplace(tok); } } if(results.size() > max_words) { results.resize(max_words); } /*auto time_micro = microseconds(std::chrono::high_resolution_clock::now() - begin).count(); if(time_micro > 1000) { LOG(INFO) << "Time taken for art_topk_iter: " << time_micro << "us, size of nodes: " << nodes.size() << ", filter_ids_length: " << filter_ids_length; }*/ if(allowed_doc_ids != filter_ids) { delete [] allowed_doc_ids; } return 0; } int art_fuzzy_search_i(art_tree *t, const unsigned char *term, const int term_len, const int min_cost, const int max_cost, const size_t max_words, const token_ordering token_order, const bool prefix, bool last_token, const std::string& prev_token, filter_result_iterator_t* const filter_result_iterator, std::vector<art_leaf *> &results, std::set<std::string>& exclude_leaves) { std::vector<const art_node*> nodes; int irow[term_len + 1]; int jrow[term_len + 1]; for (int i = 0; i <= term_len; i++){ irow[i] = jrow[i] = i; } //auto begin = std::chrono::high_resolution_clock::now(); if(IS_LEAF(t->root)) { art_leaf *l = (art_leaf *) LEAF_RAW(t->root); art_fuzzy_recurse(0, l->key[0], t->root, 0, term, term_len, irow, jrow, min_cost, max_cost, prefix, nodes); } else { if(t->root == nullptr) { return 0; } // send depth as -1 to indicate that this is a root node art_fuzzy_recurse(0, 0, t->root, -1, term, term_len, irow, jrow, min_cost, max_cost, prefix, nodes); } //long long int time_micro = microseconds(std::chrono::high_resolution_clock::now() - begin).count(); //!LOG(INFO) << "Time taken for fuzz: " << time_micro << "us, size of nodes: " << nodes.size(); //auto begin = std::chrono::high_resolution_clock::now(); size_t key_len = prefix ? term_len + 1 : term_len; art_leaf* exact_leaf = (art_leaf *) art_search(t, term, key_len); //LOG(INFO) << "exact_leaf: " << exact_leaf << ", term: " << term << ", term_len: " << term_len; for(auto node: nodes) { art_topk_iter(node, token_order, max_words, exact_leaf, last_token, prev_token, filter_result_iterator, t, exclude_leaves, results); } if(token_order == FREQUENCY) { std::sort(results.begin(), results.end(), compare_art_leaf_frequency); } else { std::sort(results.begin(), results.end(), compare_art_leaf_score); } if(exact_leaf && min_cost == 0) { std::string tok(reinterpret_cast<char*>(exact_leaf->key), exact_leaf->key_len - 1); if(exclude_leaves.count(tok) == 0) { results.insert(results.begin(), exact_leaf); exclude_leaves.emplace(tok); } } if(results.size() > max_words) { results.resize(max_words); } /*auto time_micro = microseconds(std::chrono::high_resolution_clock::now() - begin).count(); if(time_micro > 1000) { LOG(INFO) << "Time taken for art_topk_iter: " << time_micro << "us, size of nodes: " << nodes.size() << ", filter_ids_length: " << filter_result_iterator.approx_filter_ids_length; }*/ return 0; } void encode_int32(int32_t n, unsigned char *chars) { unsigned char symbols[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; unsigned char bytes[4]; bytes[0] = (unsigned char) ((n >> 24) & 0xFF); bytes[1] = (unsigned char) ((n >> 16) & 0xFF); bytes[2] = (unsigned char) ((n >> 8) & 0xFF); bytes[3] = (unsigned char) (n & 0xFF); for(uint32_t i = 0; i < 4; i++) { chars[2*i] = symbols[((bytes[i] >> 4) & 0x0F)]; chars[2*i+1] = symbols[(bytes[i] & 0x0F)]; } } void encode_int64(int64_t n, unsigned char *chars) { chars[0] = (unsigned char) ((n >> 56) & 0xFF); chars[1] = (unsigned char) ((n >> 48) & 0xFF); chars[2] = (unsigned char) ((n >> 40) & 0xFF); chars[3] = (unsigned char) ((n >> 32) & 0xFF); chars[4] = (unsigned char) ((n >> 24) & 0xFF); chars[5] = (unsigned char) ((n >> 16) & 0xFF); chars[6] = (unsigned char) ((n >> 8) & 0xFF); chars[7] = (unsigned char) (n & 0xFF); } // See: https://github.com/apache/hbase/blob/master/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java#L1372 void encode_float(float n, unsigned char *chars) { int32_t i; memcpy(&i, &n, sizeof(int32_t)); i ^= ((i >> (std::numeric_limits<int32_t>::digits - 1)) | INT32_MIN); encode_int32(i, chars); } // Implements ==, <= and >= recurse_progress matches(unsigned char a, unsigned char b, NUM_COMPARATOR comparator) { switch(comparator) { case LESS_THAN: case LESS_THAN_EQUALS: if (a == b) return RECURSE; else if(a < b) return ITERATE; return ABORT; case EQUALS: if(a == b) return RECURSE; return ABORT; case GREATER_THAN: case GREATER_THAN_EQUALS: if (a == b) return RECURSE; else if(a > b) return ITERATE; return ABORT; default: abort(); } } static void art_iter(const art_node *n, const unsigned char* int_str, int int_str_len, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results) { // Handle base cases if (!n) return ; if (IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); compare_and_match_leaf(int_str, int_str_len, comparator, results, l); return ; } int idx; switch (n->type) { case NODE4: for (int i=0; i < n->num_children; i++) { art_iter(((art_node4 *) n)->children[i], int_str, int_str_len, comparator, results); } break; case NODE16: for (int i=0; i < n->num_children; i++) { art_iter(((art_node16 *) n)->children[i], int_str, int_str_len, comparator, results); } break; case NODE48: for (int i=0; i < 256; i++) { idx = ((art_node48*)n)->keys[i]; if (!idx) continue; art_iter(((art_node48 *) n)->children[idx - 1], int_str, int_str_len, comparator, results); } break; case NODE256: for (int i=0; i < 256; i++) { if (!((art_node256*)n)->children[i]) continue; art_iter(((art_node256 *) n)->children[i], int_str, int_str_len, comparator, results); } break; default: abort(); } return ; } static inline void art_int_fuzzy_children(const art_node *n, int depth, const unsigned char* int_str, int int_str_len, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results) { unsigned char child_char; art_node* child; switch (n->type) { case NODE4: printf("\nNODE4\n"); for (int i=n->num_children-1; i >= 0; i--) { child_char = ((art_node4*)n)->keys[i]; printf("4!child_char: %c, %d, depth: %d\n", child_char, child_char, depth); child = ((art_node4*)n)->children[i]; recurse_progress progress = matches(child_char, int_str[depth], comparator); if(progress == RECURSE) { art_int_fuzzy_recurse(child, depth+1, int_str, int_str_len, comparator, results); } else if(progress == ITERATE) { art_iter(child, int_str, int_str_len, comparator, results); } } break; case NODE16: printf("\nNODE16\n"); for (int i=n->num_children-1; i >= 0; i--) { child_char = ((art_node16*)n)->keys[i]; printf("16!child_char: %c, depth: %d\n", child_char, depth); child = ((art_node16*)n)->children[i]; recurse_progress progress = matches(child_char, int_str[depth], comparator); if(progress == RECURSE) { art_int_fuzzy_recurse(child, depth+1, int_str, int_str_len, comparator, results); } else if(progress == ITERATE) { art_iter(child, int_str, int_str_len, comparator, results); } } break; case NODE48: printf("\nNODE48\n"); for (int i=255; i >= 0; i--) { int ix = ((art_node48*)n)->keys[i]; if (!ix) continue; child = ((art_node48*)n)->children[ix - 1]; child_char = (unsigned char)i; printf("48!child_char: %c, depth: %d, ix: %d\n", child_char, depth, ix); recurse_progress progress = matches(child_char, int_str[depth], comparator); if(progress == RECURSE) { art_int_fuzzy_recurse(child, depth+1, int_str, int_str_len, comparator, results); } else if(progress == ITERATE) { art_iter(child, int_str, int_str_len, comparator, results); } } break; case NODE256: printf("\nNODE256\n"); for (int i=255; i >= 0; i--) { if (!((art_node256*)n)->children[i]) continue; child_char = (unsigned char) i; printf("256!child_char: %c, depth: %d\n", child_char, depth); child = ((art_node256*)n)->children[i]; recurse_progress progress = matches(child_char, int_str[depth], comparator); if(progress == RECURSE) { art_int_fuzzy_recurse(child, depth+1, int_str, int_str_len, comparator, results); } else if(progress == ITERATE) { art_iter(child, int_str, int_str_len, comparator, results); } } break; default: abort(); } } void art_int_fuzzy_recurse(art_node *n, int depth, const unsigned char* int_str, int int_str_len, NUM_COMPARATOR comparator, std::vector<const art_leaf*> &results) { if (!n) return ; if(IS_LEAF(n)) { art_leaf *l = (art_leaf *) LEAF_RAW(n); while(depth < int_str_len) { unsigned char c = l->key[depth]; recurse_progress progress = matches(c, int_str[depth], comparator); if(progress == ABORT) { return; } if(progress == ITERATE) { break; } depth++; } compare_and_match_leaf(int_str, int_str_len, comparator, results, l); return ; } const int partial_len = min(MAX_PREFIX_LEN, n->partial_len); const int end_index = min(partial_len, int_str_len); printf("\npartial_len: %d", partial_len); for(int idx=0; idx<end_index; idx++) { unsigned char c = n->partial[idx]; recurse_progress progress = matches(c, int_str[depth+idx], comparator); if(progress == ABORT) { return; } if(progress == ITERATE) { return art_iter(n, int_str, int_str_len, comparator, results); } } depth += n->partial_len; art_int_fuzzy_children(n, depth, int_str, int_str_len, comparator, results); } void compare_and_match_leaf(const unsigned char *int_str, int int_str_len, const NUM_COMPARATOR &comparator, std::vector<const art_leaf *> &results, const art_leaf *l) { if(comparator == LESS_THAN || comparator == GREATER_THAN) { for(uint32_t i = 0; i < l->key_len; i++) { if(int_str[i] != l->key[i]) { results.push_back(l); return ; } } } else { results.push_back(l); } } int art_int32_search(art_tree *t, int32_t value, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results) { unsigned char chars[8]; encode_int32(value, chars); art_int_fuzzy_recurse(t->root, 0, chars, 8, comparator, results); return 0; } int art_int64_search(art_tree *t, int64_t value, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results) { unsigned char chars[8]; encode_int64(value, chars); art_int_fuzzy_recurse(t->root, 0, chars, 8, comparator, results); return 0; } int art_float_search(art_tree *t, float value, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results) { unsigned char chars[8]; encode_float(value, chars); art_int_fuzzy_recurse(t->root, 0, chars, 8, comparator, results); return 0; }
71,866
C++
.cpp
1,808
30.279867
131
0.538441
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,802
app_metrics.cpp
typesense_typesense/src/app_metrics.cpp
#include "app_metrics.h" #include "core_api.h" void AppMetrics::increment_write_metrics(uint64_t route_hash, uint64_t duration) { if(is_doc_import_route(route_hash)) { AppMetrics::get_instance().increment_duration(AppMetrics::IMPORT_LABEL, duration); AppMetrics::get_instance().increment_count(AppMetrics::IMPORT_LABEL, 1); } else if(is_doc_write_route(route_hash)) { AppMetrics::get_instance().increment_duration(AppMetrics::DOC_WRITE_LABEL, duration); AppMetrics::get_instance().increment_count(AppMetrics::DOC_WRITE_LABEL, 1); } else if(is_doc_del_route(route_hash)) { AppMetrics::get_instance().increment_duration(AppMetrics::DOC_DELETE_LABEL, duration); AppMetrics::get_instance().increment_count(AppMetrics::DOC_DELETE_LABEL, 1); } } void AppMetrics::get(const std::string& rps_key, const std::string& latency_key, nlohmann::json& result) const { std::shared_lock lock(mutex); uint64_t total_counts = 0; auto SEARCH_RPS_KEY = SEARCH_LABEL + "_" + rps_key; auto SEARCH_LATENCY_KEY = SEARCH_LABEL + "_" + latency_key; auto IMPORT_RPS_KEY = IMPORT_LABEL + "_" + rps_key; auto IMPORT_LATENCY_KEY = IMPORT_LABEL + "_" + latency_key; auto DOC_WRITE_RPS_KEY = DOC_WRITE_LABEL + "_" + rps_key; auto DOC_WRITE_LATENCY_KEY = DOC_WRITE_LABEL + "_" + latency_key; auto DOC_DELETE_RPS_KEY = DOC_DELETE_LABEL + "_" + rps_key; auto DOC_DELETE_LATENCY_KEY = DOC_DELETE_LABEL + "_" + latency_key; auto OVERLOADED_RPS_KEY = OVERLOADED_LABEL + "_" + rps_key; result[rps_key] = nlohmann::json::object(); for(const auto& kv: *counts) { if(kv.first == SEARCH_LABEL) { result[SEARCH_RPS_KEY] = double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000); } else if(kv.first == IMPORT_LABEL) { result[IMPORT_RPS_KEY] = double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000); } else if(kv.first == DOC_WRITE_LABEL) { result[DOC_WRITE_RPS_KEY] = double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000); } else if(kv.first == DOC_DELETE_LABEL) { result[DOC_DELETE_RPS_KEY] = double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000); } else if(kv.first == OVERLOADED_LABEL) { result[OVERLOADED_RPS_KEY] = double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000); } else { result[rps_key][kv.first] = (double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000)); total_counts += kv.second; } } result["total_" + rps_key] = double(total_counts) / (METRICS_REFRESH_INTERVAL_MS / 1000); result[latency_key] = nlohmann::json::object(); for(const auto& kv: *durations) { auto counter_it = counts->find(kv.first); if(counter_it != counts->end() && counter_it->second != 0) { if(kv.first == SEARCH_LABEL) { result[SEARCH_LATENCY_KEY] = (double(kv.second) / counter_it->second); } else if(kv.first == IMPORT_LABEL) { result[IMPORT_LATENCY_KEY] = (double(kv.second) / counter_it->second); } else if(kv.first == DOC_WRITE_LABEL) { result[DOC_WRITE_LATENCY_KEY] = (double(kv.second) / counter_it->second); } else if(kv.first == DOC_DELETE_LABEL) { result[DOC_DELETE_LATENCY_KEY] = (double(kv.second) / counter_it->second); } else { result[latency_key][kv.first] = (double(kv.second) / counter_it->second); } } } std::vector<std::string> keys_to_check = { SEARCH_RPS_KEY, IMPORT_RPS_KEY, DOC_WRITE_RPS_KEY, DOC_DELETE_RPS_KEY, SEARCH_LATENCY_KEY, IMPORT_LATENCY_KEY, DOC_WRITE_LATENCY_KEY, DOC_DELETE_LATENCY_KEY, OVERLOADED_RPS_KEY }; for(auto& key: keys_to_check) { if(!result.contains(key)) { result[key] = 0; } } } void AppMetrics::window_reset() { std::unique_lock lock(mutex); delete counts; counts = current_counts; current_counts = new spp::sparse_hash_map<std::string, uint64_t>(); delete durations; durations = current_durations; current_durations = new spp::sparse_hash_map<std::string, uint64_t>(); } void AppMetrics::write_access_log(const uint64_t epoch_millis, const char* remote_ip, const std::string& path) { if(!access_log_path.empty()) { access_log << epoch_millis << "\t" << remote_ip << "\t" << path << "\n"; } } void AppMetrics::flush_access_log() { if(!access_log_path.empty()) { access_log << std::flush; } }
4,702
C++
.cpp
102
38.176471
112
0.613348
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,803
http_server.cpp
typesense_typesense/src/http_server.cpp
#include "http_data.h" #include "http_server.h" #include "string_utils.h" #include <regex> #include <thread> #include <signal.h> #include <h2o.h> #include <iostream> #include <auth_manager.h> #include <app_metrics.h> #include "raft_server.h" #include "logger.h" #include "ratelimit_manager.h" #include "sole.hpp" #include "core_api.h" HttpServer::HttpServer(const std::string & version, const std::string & listen_address, uint32_t listen_port, const std::string & ssl_cert_path, const std::string & ssl_cert_key_path, const uint64_t ssl_refresh_interval_ms, bool cors_enabled, const std::set<std::string>& cors_domains, ThreadPool* thread_pool): SSL_REFRESH_INTERVAL_MS(ssl_refresh_interval_ms), exit_loop(false), version(version), listen_address(listen_address), listen_port(listen_port), ssl_cert_path(ssl_cert_path), ssl_cert_key_path(ssl_cert_key_path), cors_enabled(cors_enabled), cors_domains(cors_domains), thread_pool(thread_pool) { accept_ctx = new h2o_accept_ctx_t(); h2o_config_init(&config); hostconf = h2o_config_register_host(&config, h2o_iovec_init(H2O_STRLIT("default")), 65535); register_handler(hostconf, "/", catch_all_handler); listener_socket = nullptr; // initialized later signal(SIGPIPE, SIG_IGN); h2o_context_init(&ctx, h2o_evloop_create(), &config); ctx.globalconf->server_name.base = nullptr; // initialized later message_dispatcher = new http_message_dispatcher; message_dispatcher->init(ctx.loop); // used during destructor ssl_refresh_timer.timer.expire_at = 0; metrics_refresh_timer.timer.expire_at = 0; meta_thread_pool = new ThreadPool(4); accept_ctx->ssl_ctx = nullptr; } void HttpServer::on_accept(h2o_socket_t *listener, const char *err) { HttpServer* http_server = reinterpret_cast<HttpServer*>(listener->data); h2o_socket_t *sock; if (err != NULL) { return; } if ((sock = h2o_evloop_socket_accept(listener)) == NULL) { return; } h2o_accept(http_server->accept_ctx, sock); } void HttpServer::on_metrics_refresh_timeout(h2o_timer_t *entry) { h2o_custom_timer_t* custom_timer = reinterpret_cast<h2o_custom_timer_t*>(entry); AppMetrics::get_instance().window_reset(); AppMetrics::get_instance().flush_access_log(); HttpServer *hs = static_cast<HttpServer*>(custom_timer->data); // link the timer for the next cycle h2o_timer_link( hs->ctx.loop, AppMetrics::METRICS_REFRESH_INTERVAL_MS, &hs->metrics_refresh_timer.timer ); } void HttpServer::on_ssl_refresh_timeout(h2o_timer_t *entry) { h2o_custom_timer_t* custom_timer = reinterpret_cast<h2o_custom_timer_t*>(entry); LOG(INFO) << "Refreshing SSL certs from disk."; HttpServer *hs = static_cast<HttpServer*>(custom_timer->data); SSL_CTX* old_ssl_ctx = hs->accept_ctx->ssl_ctx; bool refresh_success = initialize_ssl_ctx(hs->ssl_cert_path.c_str(), hs->ssl_cert_key_path.c_str(), hs->accept_ctx); if (refresh_success) { // delete the old SSL context but after some time, to allow existing connections to drain h2o_custom_timer_t* ssl_ctx_delete_timer = new h2o_custom_timer_t(old_ssl_ctx); h2o_timer_init(&ssl_ctx_delete_timer->timer, on_ssl_ctx_delete_timeout); uint64_t delete_lag = std::max<uint64_t>(60 * 1000, hs->SSL_REFRESH_INTERVAL_MS / 2); h2o_timer_link(hs->ctx.loop, delete_lag, &ssl_ctx_delete_timer->timer); } else { LOG(ERROR) << "SSL cert refresh failed."; } // link the timer for the next cycle h2o_timer_link(hs->ctx.loop, hs->SSL_REFRESH_INTERVAL_MS, &hs->ssl_refresh_timer.timer); } void HttpServer::on_ssl_ctx_delete_timeout(h2o_timer_t *entry) { LOG(INFO) << "Deleting old SSL context."; h2o_custom_timer_t* custom_timer = reinterpret_cast<h2o_custom_timer_t*>(entry); SSL_CTX* old_ssl_ctx = static_cast<SSL_CTX*>(custom_timer->data); SSL_CTX_free(old_ssl_ctx); delete custom_timer; } int HttpServer::setup_ssl(const char *cert_file, const char *key_file) { // Set up a timer to refresh SSL config from disk. Also, initializing upfront so that destructor works ssl_refresh_timer = h2o_custom_timer_t(this); h2o_timer_init(&ssl_refresh_timer.timer, on_ssl_refresh_timeout); h2o_timer_link(ctx.loop, SSL_REFRESH_INTERVAL_MS, &ssl_refresh_timer.timer); LOG(INFO) << "SSL cert refresh interval: " << (SSL_REFRESH_INTERVAL_MS / 1000) << "s"; if(!initialize_ssl_ctx(cert_file, key_file, accept_ctx)) { return -1; } return 0; } int HttpServer::create_listener() { struct sockaddr_in addr; int fd, reuseaddr_flag = 1; if(!ssl_cert_path.empty() && !ssl_cert_key_path.empty()) { int ssl_setup_code = setup_ssl(ssl_cert_path.c_str(), ssl_cert_key_path.c_str()); if(ssl_setup_code != 0) { return -1; } } ctx.globalconf->server_name = h2o_strdup(nullptr, "", SIZE_MAX); ctx.globalconf->http2.active_stream_window_size = ACTIVE_STREAM_WINDOW_SIZE; ctx.globalconf->http2.idle_timeout = REQ_TIMEOUT_MS; ctx.globalconf->max_request_entity_size = (size_t(10) * 1024 * 1024 * 1024); // 10 GB ctx.globalconf->http1.req_timeout = REQ_TIMEOUT_MS; ctx.globalconf->http1.req_io_timeout = REQ_TIMEOUT_MS; accept_ctx->ctx = &ctx; accept_ctx->hosts = config.hosts; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_port = htons(listen_port); inet_pton(AF_INET, listen_address.c_str(), &(addr.sin_addr)); if ((fd = socket(AF_INET, SOCK_STREAM, 0)) == -1 || setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr_flag, sizeof(reuseaddr_flag)) != 0 || bind(fd, (struct sockaddr *)&addr, sizeof(addr)) != 0 || listen(fd, SOMAXCONN) != 0) { return -1; } listener_socket = h2o_evloop_socket_create(ctx.loop, fd, H2O_SOCKET_FLAG_DONT_READ); listener_socket->data = this; h2o_socket_read_start(listener_socket, on_accept); return 0; } int HttpServer::run(ReplicationState* replication_state) { this->replication_state = replication_state; metrics_refresh_timer = h2o_custom_timer_t(this); h2o_timer_init(&metrics_refresh_timer.timer, on_metrics_refresh_timeout); h2o_timer_link(ctx.loop, AppMetrics::METRICS_REFRESH_INTERVAL_MS, &metrics_refresh_timer.timer); if (create_listener() != 0) { LOG(ERROR) << "Failed to listen on " << listen_address << ":" << listen_port << " - " << strerror(errno); return 1; } else { LOG(INFO) << "Typesense has started listening on port " << listen_port; } message_dispatcher->on(STOP_SERVER_MESSAGE, HttpServer::on_stop_server); while(!exit_loop) { h2o_evloop_run(ctx.loop, INT32_MAX); } return 0; } bool HttpServer::on_stop_server(void *data) { // do nothing return true; } std::string HttpServer::get_version() { return version; } void HttpServer::clear_timeouts(const std::vector<h2o_timer_t*> & timers, bool trigger_callback) { for(h2o_timer_t* timer: timers) { h2o_timer_unlink(timer); } } void HttpServer::stop() { if(listener_socket != nullptr) { h2o_socket_read_stop(listener_socket); h2o_socket_close(listener_socket); } // this will break the event loop exit_loop = true; // send a message to activate the idle event loop to exit, just in case message_dispatcher->send_message(STOP_SERVER_MESSAGE, nullptr); } h2o_pathconf_t* HttpServer::register_handler(h2o_hostconf_t *hostconf, const char *path, int (*on_req)(h2o_handler_t *, h2o_req_t *)) { // See: https://github.com/h2o/h2o/issues/181#issuecomment-75393049 h2o_pathconf_t *pathconf = h2o_config_register_path(hostconf, path, 0); h2o_custom_req_handler_t *handler = reinterpret_cast<h2o_custom_req_handler_t*>(h2o_create_handler(pathconf, sizeof(*handler))); handler->http_server = this; handler->super.on_req = on_req; // Enable streaming request body handler->super.supports_request_streaming = 1; compress_args.min_size = 256; // don't gzip less than this size compress_args.brotli.quality = -1; // disable, not widely supported compress_args.gzip.quality = 1; // fastest h2o_compress_register(pathconf, &compress_args); return pathconf; } uint64_t HttpServer::find_route(const std::vector<std::string> & path_parts, const std::string & http_method, route_path** found_rpath) { for (const auto& index_route : route_hash_to_path) { const route_path & rpath = index_route.second; if(rpath.path_parts.size() != path_parts.size() || rpath.http_method != http_method) { continue; } bool found = true; for(size_t j = 0; j < rpath.path_parts.size(); j++) { const std::string & rpart = rpath.path_parts[j]; const std::string & given_part = path_parts[j]; if(rpart != given_part && rpart[0] != ':') { found = false; break; } } if(found) { *found_rpath = const_cast<route_path *>(&rpath); return index_route.first; } } return static_cast<uint64_t>(ROUTE_CODES::NOT_FOUND); } void HttpServer::on_res_generator_dispose(void *self) { //LOG(INFO) << "on_res_generator_dispose fires"; h2o_custom_generator_t* custom_generator = *static_cast<h2o_custom_generator_t**>(self); // locking to ensure dispose does not happen while the h2o req object is being written to { std::unique_lock lk(custom_generator->res()->mres); custom_generator->res()->final = true; custom_generator->res()->generator = nullptr; custom_generator->res()->is_alive = false; custom_generator->req()->is_diposed = true; custom_generator->req()->notify(); custom_generator->res()->notify(); } // without this, warning about memory allocated by std::string leaking happens delete custom_generator; } int HttpServer::catch_all_handler(h2o_handler_t *_h2o_handler, h2o_req_t *req) { h2o_custom_req_handler_t* h2o_handler = (h2o_custom_req_handler_t *)_h2o_handler; const std::string & http_method = std::string(req->method.base, req->method.len); const std::string & path = std::string(req->path.base, req->path.len); std::vector<std::string> path_with_query_parts; // These guards have been added to debug a strange issue of `path_with_query_parts` being empty sometimes if(req->path.len == 0 || path.empty()) { LOG(ERROR) << "Request path is empty: path.len=" << req->path.len << ", path: " << path; nlohmann::json resp; resp["message"] = "Request path is empty."; return send_response(req, 400, resp.dump()); } else { StringUtils::split(path, path_with_query_parts, "?"); if(path_with_query_parts.empty()) { LOG(ERROR) << "Request path is empty after splitting: path=" << path; nlohmann::json resp; resp["message"] = "Request path after splitting is empty."; return send_response(req, 400, resp.dump()); } } const std::string & path_without_query = path_with_query_parts[0]; std::string metric_identifier = http_method + " " + path_without_query; AppMetrics::get_instance().increment_count(metric_identifier, 1); std::string client_ip = http_req::get_ip_addr(req).ip; if(Config::get_instance().get_enable_access_logging()) { uint64_t now = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count(); auto epoch_millis = now / 1000; AppMetrics::get_instance().write_access_log(epoch_millis, client_ip.c_str(), metric_identifier); } // Handle CORS if(h2o_handler->http_server->cors_enabled) { h2o_iovec_t response_origin = {(char*)"*", 1}; if(!h2o_handler->http_server->cors_domains.empty()) { auto acl_origin_cursor = h2o_find_header(&req->headers, H2O_TOKEN_ORIGIN, -1); if(acl_origin_cursor != -1) { response_origin = req->headers.entries[acl_origin_cursor].value; std::string origin_str = std::string(response_origin.base, response_origin.len); if(h2o_handler->http_server->cors_domains.count(origin_str) == 0) { response_origin = {(char*)"", 0}; } } } if(response_origin.len != 0) { // only send header if origin matches or if wildcard allowed h2o_add_header_by_str(&req->pool, &req->res.headers, H2O_STRLIT("access-control-allow-origin"), 0, NULL, response_origin.base, response_origin.len); } if(http_method == "OPTIONS") { // locate request access control headers const char* ACL_REQ_HEADERS = "access-control-request-headers"; ssize_t acl_header_cursor = h2o_find_header_by_str(&req->headers, ACL_REQ_HEADERS, strlen(ACL_REQ_HEADERS), -1); if(acl_header_cursor != -1) { h2o_iovec_t &acl_req_headers = req->headers.entries[acl_header_cursor].value; h2o_generator_t generator = {NULL, NULL}; h2o_iovec_t res_body = h2o_strdup(&req->pool, "", SIZE_MAX); req->res.status = 200; req->res.reason = http_res::get_status_reason(200); h2o_add_header_by_str(&req->pool, &req->res.headers, H2O_STRLIT("access-control-allow-methods"), 0, NULL, H2O_STRLIT("POST, GET, DELETE, PUT, PATCH, OPTIONS")); h2o_add_header_by_str(&req->pool, &req->res.headers, H2O_STRLIT("access-control-allow-headers"), 0, NULL, acl_req_headers.base, acl_req_headers.len); h2o_add_header_by_str(&req->pool, &req->res.headers, H2O_STRLIT("access-control-max-age"), 0, NULL, H2O_STRLIT("3600")); h2o_start_response(req, &generator); h2o_send(req, &res_body, 1, H2O_SEND_STATE_FINAL); return 0; } } } std::vector<std::string> path_parts; StringUtils::split(path_without_query, path_parts, "/"); h2o_iovec_t query = req->query_at != SIZE_MAX ? h2o_iovec_init(req->path.base + req->query_at, req->path.len - req->query_at) : h2o_iovec_init(H2O_STRLIT("")); if(query.len > 4000 && http_method == "GET" && !path_parts.empty() && path_parts.back() == "search") { nlohmann::json resp; resp["message"] = "Query string exceeds max allowed length of 4000. Use the /multi_search end-point for larger payloads."; return send_response(req, 400, resp.dump()); } std::string query_str(query.base, query.len); std::map<std::string, std::string> query_map; StringUtils::parse_query_string(query_str, query_map); // cache ttl can be applied only from an embedded key: cannot be a get param query_map.erase("cache_ttl"); // Extract auth key from header. If that does not exist, look for a GET parameter. ssize_t auth_header_cursor = h2o_find_header_by_str(&req->headers, http_req::AUTH_HEADER, strlen(http_req::AUTH_HEADER), -1); std::string api_auth_key_sent; if(auth_header_cursor != -1) { h2o_iovec_t & slot = req->headers.entries[auth_header_cursor].value; api_auth_key_sent = std::string(slot.base, slot.len); } else if(query_map.count(http_req::AUTH_HEADER) != 0) { api_auth_key_sent = query_map[http_req::AUTH_HEADER]; } // extract user id from header, if not already present as GET param ssize_t user_header_cursor = h2o_find_header_by_str(&req->headers, http_req::USER_HEADER, strlen(http_req::USER_HEADER), -1); if(user_header_cursor != -1) { h2o_iovec_t & slot = req->headers.entries[user_header_cursor].value; std::string user_id_sent = std::string(slot.base, slot.len); query_map[http_req::USER_HEADER] = user_id_sent; } else if(query_map.count(http_req::USER_HEADER) == 0) { query_map[http_req::USER_HEADER] = client_ip; } route_path *rpath = nullptr; uint64_t route_hash = h2o_handler->http_server->find_route(path_parts, http_method, &rpath); if(route_hash == static_cast<uint64_t>(ROUTE_CODES::NOT_FOUND)) { std::string message = "{ \"message\": \"Not Found\"}"; return send_response(req, 404, message); } const std::string& root_resource = (path_parts.empty()) ? "" : path_parts[0]; //LOG(INFO) << "root_resource is: " << root_resource; bool needs_readiness_check = (root_resource == "collections") || !( root_resource == "health" || root_resource == "debug" || root_resource == "proxy" || root_resource == "stats.json" || root_resource == "metrics.json" || root_resource == "sequence" || root_resource == "operations" || root_resource == "config" || root_resource == "status" ); bool use_meta_thread_pool = (root_resource == "status"); if(needs_readiness_check) { bool write_op = is_write_request(root_resource, http_method, rpath->handler); bool read_op = !write_op; std::string message = "{ \"message\": \"Not Ready or Lagging\"}"; if(read_op && !h2o_handler->http_server->get_replication_state()->is_read_caught_up()) { return send_response(req, 503, message); } else if(write_op && !h2o_handler->http_server->get_replication_state()->is_write_caught_up()) { return send_response(req, 503, message); } } // iterate and extract path params for(size_t i = 0; i < rpath->path_parts.size(); i++) { const std::string & path_part = rpath->path_parts[i]; if(path_part[0] == ':') { std::string value = StringUtils::url_decode(path_parts[i]); query_map.emplace(path_part.substr(1), value); } } const std::string& body = std::string(req->entity.base, req->entity.len); std::vector<nlohmann::json> embedded_params_vec; if(RateLimitManager::getInstance()->is_rate_limited({RateLimitedEntityType::api_key, api_auth_key_sent}, {RateLimitedEntityType::ip, client_ip})) { std::string message = "{ \"message\": \"Rate limit exceeded or blocked\"}"; return send_response(req, 429, message); } bool is_multi_search_query = (root_resource == "multi_search"); if(Config::get_instance().get_enable_search_logging()) { std::string query_string = "?"; bool is_search_query = (is_multi_search_query || StringUtils::ends_with(path_without_query, "/documents/search")); if(is_search_query) { std::string search_payload; if(is_multi_search_query) { search_payload = body; StringUtils::erase_char(search_payload, '\n'); } // ignore params map of multi_search since it is mutated for every search object in the POST body for(const auto& kv: query_map) { if(kv.first != http_req::AUTH_HEADER) { query_string += kv.first + "=" + kv.second + "&"; } } std::string full_url_path = metric_identifier + query_string; // NOTE: we log the `body` ONLY for multi-search query LOG(INFO) << "event=search_request" << ", client_ip=" << client_ip << ", endpoint=" << full_url_path << ", body=" << (is_multi_search_query ? search_payload : ""); } } if(!is_multi_search_query) { // multi_search needs to be handled later because the API key could be part of request body and // the whole request body might not be available right now. bool authenticated = h2o_handler->http_server->auth_handler(query_map, embedded_params_vec, body, *rpath, api_auth_key_sent); if(!authenticated) { std::string message = std::string("{\"message\": \"Forbidden - a valid `") + http_req::AUTH_HEADER + "` header must be sent.\"}"; return send_response(req, 401, message); } } ssize_t content_type_header_cursor = h2o_find_header_by_str(&req->headers, http_req::CONTENT_TYPE_HEADER, strlen(http_req::CONTENT_TYPE_HEADER), -1); bool is_binary_body = false; if (content_type_header_cursor != -1) { h2o_iovec_t& slot = req->headers.entries[content_type_header_cursor].value; std::string content_type = std::string(slot.base, slot.len); is_binary_body = (content_type == http_req::OCTET_STREAM_HEADER_VALUE); } std::shared_ptr<http_req> request = std::make_shared<http_req>(req, rpath->http_method, path_without_query, route_hash, query_map, embedded_params_vec, api_auth_key_sent, body, client_ip, is_binary_body); // add custom generator with a dispose function for cleaning up resources h2o_custom_generator_t* custom_gen = new h2o_custom_generator_t; std::shared_ptr<http_res> response = std::make_shared<http_res>(custom_gen); custom_gen->h2o_generator = h2o_generator_t {response_proceed, response_abort}; custom_gen->request = request; custom_gen->response = response; custom_gen->rpath = rpath; custom_gen->h2o_handler = h2o_handler; h2o_custom_generator_t** allocated_generator = static_cast<h2o_custom_generator_t**>( h2o_mem_alloc_shared(&req->pool, sizeof(*allocated_generator), on_res_generator_dispose) ); *allocated_generator = custom_gen; // ensures that the first response need not wait for previous chunk to be done sending response->notify(); //LOG(INFO) << "Init res: " << custom_gen->response << ", ref count: " << custom_gen->response.use_count(); if(root_resource == "multi_search") { // format is <length of api_auth_key_sent>:<api_auth_key_sent><client_ip> std::string multi_search_key = std::to_string(api_auth_key_sent.length()) + ":" + api_auth_key_sent + client_ip; request->metadata = multi_search_key; } // routes match and is an authenticated request // do any additional pre-request middleware operations here if(rpath->action == "keys:create") { // we enrich incoming request with a random API key here so that leader and replicas will use the same key request->metadata = StringUtils::randstring(AuthManager::GENERATED_KEY_LEN); } if(rpath->action == "conversations/models:create") { try { nlohmann::json body_json = nlohmann::json::parse(request->body); if(body_json.count("id") != 0 && body_json["id"].is_string()) { request->metadata = body_json["id"].get<std::string>(); } else { request->metadata = sole::uuid4().str(); } } catch (const nlohmann::json::parse_error& e) { request->metadata = sole::uuid4().str(); } } if(req->proceed_req == nullptr) { // Full request body is already available, so we don't care if handler is async or not //LOG(INFO) << "Full request body is already available: " << req->entity.len; request->last_chunk_aggregate = true; return process_request(request, response, rpath, h2o_handler, use_meta_thread_pool); } else { // Only partial request body is available. // If rpath->async_req is true, the request handler function will be invoked multiple times, for each chunk //LOG(INFO) << "Partial request body length: " << req->entity.len; req->write_req.cb = async_req_cb; req->write_req.ctx = custom_gen; req->proceed_req(req, NULL); } return 0; } bool HttpServer::is_write_request(const std::string& root_resource, const std::string& http_method, bool (*rpath_handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&)) { if(http_method == "GET") { return false; } if(rpath_handler == post_create_event) { return false; } bool write_free_request = (root_resource == "multi_search" || root_resource == "proxy" || root_resource == "operations" || root_resource == "config"); if(!write_free_request && (http_method == "POST" || http_method == "PUT" || http_method == "DELETE" || http_method == "PATCH")) { return true; } return false; } int HttpServer::async_req_cb(void *ctx, int is_end_stream) { h2o_custom_generator_t* custom_generator = static_cast<h2o_custom_generator_t*>(ctx); const std::shared_ptr<http_req>& request = custom_generator->req(); const std::shared_ptr<http_res>& response = custom_generator->res(); h2o_iovec_t chunk = request->_req->entity; bool async_req = custom_generator->rpath->async_req; bool is_http_v1 = (0x101 <= request->_req->version && request->_req->version < 0x200); /*LOG(INFO) << "async_req_cb, chunk.len=" << chunk.len << ", is_http_v1: " << is_http_v1 << ", request->req->entity.len=" << request->_req->entity.len << ", content_len: " << request->_req->content_length << ", is_end_stream=" << is_end_stream;*/ // disallow specific curl clients from using import call via http2 // detects: https://github.com/curl/curl/issues/1410 if(!is_http_v1 && async_req && request->first_chunk_aggregate && request->chunk_len == 0 && request->path_without_query.find("import") != std::string::npos) { ssize_t agent_header_cursor = h2o_find_header_by_str(&request->_req->headers, http_req::AGENT_HEADER, strlen(http_req::AGENT_HEADER), -1); if(agent_header_cursor != -1) { h2o_iovec_t & slot = request->_req->headers.entries[agent_header_cursor].value; const std::string user_agent = std::string(slot.base, slot.len); if(user_agent.find("curl/") != std::string::npos) { std::string version_num; for(size_t i = 5; i < user_agent.size(); i++) { if(std::isdigit(user_agent[i])) { version_num += user_agent[i]; } } int major_version = version_num[0] - 48; // convert ascii char to integer if(major_version <= 7 && std::stoll(version_num) < 7710) { // allow >= v7.71.0 std::string message = "{ \"message\": \"HTTP2 is not supported by your curl client. " "You need to use atleast Curl v7.71.0.\"}"; h2o_iovec_t body = h2o_strdup(&request->_req->pool, message.c_str(), SIZE_MAX); request->_req->res.status = 400; request->_req->res.reason = http_res::get_status_reason(400); h2o_send(request->_req, &body, 1, H2O_SEND_STATE_ERROR); return 0; } } } } std::string chunk_str(chunk.base, chunk.len); request->body += chunk_str; request->chunk_len += chunk.len; /*LOG(INFO) << "entity: " << std::string(request->req->entity.base, std::min<size_t>(40, request->req->entity.len)) << ", chunk len: " << std::string(chunk.base, std::min<size_t>(40, chunk.len));*/ //std::this_thread::sleep_for(std::chrono::seconds(30)); //LOG(INFO) << "request->body.size(): " << request->body.size() << ", request->chunk_len=" << request->chunk_len; // LOG(INFO) << "req->entity.len: " << request->req->entity.len << ", request->chunk_len=" << request->chunk_len; bool exceeds_chunk_limit = (request->chunk_len >= ACTIVE_STREAM_WINDOW_SIZE); bool can_process_async = async_req && exceeds_chunk_limit; /*if(is_end_stream == 1) { LOG(INFO) << "is_end_stream=1"; }*/ // first let's handle the case where we are ready to fire the request handler if(can_process_async || is_end_stream) { // For async streaming requests, handler should be invoked for every aggregated chunk // For a non streaming request, buffer body and invoke only at the end if(request->first_chunk_aggregate) { request->first_chunk_aggregate = false; } // default value for last_chunk_aggregate is false request->last_chunk_aggregate = (is_end_stream == 1); process_request(request, response, custom_generator->rpath, custom_generator->h2o_handler, false); return 0; } request->_req->proceed_req(request->_req, NULL); return 0; } int HttpServer::process_request(const std::shared_ptr<http_req>& request, const std::shared_ptr<http_res>& response, route_path *rpath, const h2o_custom_req_handler_t *handler, const bool use_meta_thread_pool) { //LOG(INFO) << "process_request called"; const std::string& root_resource = (rpath->path_parts.empty()) ? "" : rpath->path_parts[0]; if(root_resource == "multi_search") { // We can authenticate only when the full request body is available bool authenticated = handler->http_server->auth_handler(request->params, request->embedded_params_vec, request->body, *rpath, request->api_auth_key); if(!authenticated) { std::string message = std::string("{\"message\": \"Forbidden - a valid `") + http_req::AUTH_HEADER + "` header must be sent.\"}"; return send_response(request->_req, 401, message); } } bool is_write = is_write_request(root_resource, rpath->http_method, rpath->handler); if(is_write) { handler->http_server->get_replication_state()->write(request, response); return 0; } auto message_dispatcher = handler->http_server->get_message_dispatcher(); auto thread_pool = use_meta_thread_pool ? handler->http_server->get_meta_thread_pool() : handler->http_server->get_thread_pool(); // LOG(INFO) << "Before enqueue res: " << response thread_pool->log_exhaustion(); thread_pool->enqueue([rpath, message_dispatcher, request, response]() { // call the API handler //LOG(INFO) << "Wait for response " << response.get() << ", action: " << rpath->_get_action(); (rpath->handler)(request, response); if(!rpath->async_res) { // lifecycle of non async res will be owned by stream responder auto req_res = new async_req_res_t(request, response, true); message_dispatcher->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); } //LOG(INFO) << "Response done " << response.get(); }); return 0; } void HttpServer::on_deferred_process_request(h2o_timer_t *entry) { h2o_custom_timer_t* custom_timer = reinterpret_cast<h2o_custom_timer_t*>(entry); deferred_req_res_t* deferred_req_res = static_cast<deferred_req_res_t*>(custom_timer->data); //LOG(INFO) << "on_deferred_process_request " << deferred_req_res->req.get(); route_path* found_rpath = nullptr; deferred_req_res->server->get_route(deferred_req_res->req->route_hash, &found_rpath); const std::shared_ptr<http_req> request = deferred_req_res->req; const std::shared_ptr<http_res> response = deferred_req_res->res; HttpServer* server = deferred_req_res->server; // done with timer, so we can clear timer and data h2o_timer_unlink(&deferred_req_res->req->defer_timer.timer); delete deferred_req_res; request->defer_timer.data = nullptr; if(found_rpath) { // must be called on a separate thread so as not to block http thread server->thread_pool->enqueue([found_rpath, request, response]() { //LOG(INFO) << "Sleeping for 5s req count " << deferred_req_res->req.use_count(); //std::this_thread::sleep_for(std::chrono::seconds(5)); //LOG(INFO) << "on_deferred_process_request, calling handler, req use count " << request.use_count(); found_rpath->handler(request, response); }); } } void HttpServer::defer_processing(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res, size_t timeout_ms) { //LOG(INFO) << "defer_processing, exit_loop: " << exit_loop << ", req: " << req.get() << ", use count: " << req.use_count(); if(req->defer_timer.data == nullptr) { //LOG(INFO) << "req->defer_timer.data is null"; auto deferred_req_res = new deferred_req_res_t(req, res, this, false); //LOG(INFO) << "req use count " << req.use_count(); req->defer_timer.data = deferred_req_res; h2o_timer_init(&req->defer_timer.timer, on_deferred_process_request); } else { // This should not happen as data is cleared when defer handler is run LOG(ERROR) << "HttpServer::defer_processing, timer data is NOT null"; h2o_timer_unlink(&req->defer_timer.timer); } h2o_timer_link(ctx.loop, timeout_ms, &req->defer_timer.timer); if(exit_loop) { // otherwise, replication thread could be stuck waiting on a future res->is_alive = false; req->notify(); res->notify(); } } void HttpServer::send_message(const std::string & type, void* data) { message_dispatcher->send_message(type, data); } int HttpServer::send_response(h2o_req_t *req, int status_code, const std::string & message) { h2o_generator_t generator = {nullptr, nullptr}; h2o_iovec_t body = h2o_strdup(&req->pool, message.c_str(), SIZE_MAX); req->res.status = status_code; req->res.reason = http_res::get_status_reason(req->res.status); h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, nullptr, H2O_STRLIT("application/json; charset=utf-8")); h2o_start_response(req, &generator); h2o_send(req, &body, 1, H2O_SEND_STATE_FINAL); return 0; } void HttpServer::response_abort(h2o_generator_t *generator, h2o_req_t *req) { LOG(INFO) << "response_abort called"; h2o_custom_generator_t* custom_generator = reinterpret_cast<h2o_custom_generator_t*>(generator); custom_generator->res()->final = true; custom_generator->res()->is_alive = false; //LOG(INFO) << "response_abort: fulfilling req & res proceed."; } void HttpServer::response_proceed(h2o_generator_t *generator, h2o_req_t *req) { //LOG(INFO) << "response_proceed called"; h2o_custom_generator_t* custom_generator = reinterpret_cast<h2o_custom_generator_t*>(generator); //LOG(INFO) << "proxied_stream: " << custom_generator->response->proxied_stream; //LOG(INFO) << "response.final: " << custom_generator->response->final; custom_generator->res()->notify(); if(custom_generator->res()->proxied_stream) { // request progression should not be tied to response generation //LOG(INFO) << "Ignoring request proceed"; return ; } // if the request itself is NOT async, call the handler since it will be the handler that will be producing content // (streaming response but not request) if (!custom_generator->rpath->async_req) { // call the handler since it will be the handler that will be producing content custom_generator->h2o_handler->http_server->defer_processing(custom_generator->req(), custom_generator->res(), 1); } } void HttpServer::stream_response(stream_response_state_t& state) { // LOG(INFO) << "stream_response called"; //std::this_thread::sleep_for(std::chrono::milliseconds (5000)); // ***IMPORTANT*** // We must ensure that fields of `state.req` are not written to for preventing race conditions with indexing thread // Check `async_req_res_t` constructor for overlapping writes. h2o_req_t* req = state.get_req(); bool start_of_res = (req->res.status == 0); if(start_of_res) { h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, state.res_content_type.data(), state.res_content_type.size()); req->res.status = (state.status == 0 && state.send_state != H2O_SEND_STATE_FINAL) ? 200 : state.status; req->res.reason = state.reason; } if(state.is_req_early_exit) { // premature termination of async request: handle this explicitly as otherwise, request is not being closed LOG(INFO) << "Premature termination of async request."; if (req->_generator == nullptr) { h2o_start_response(req, state.generator); } h2o_send(req, &state.res_buff, 1, H2O_SEND_STATE_FINAL); h2o_dispose_request(req); return ; } if (start_of_res) { /*LOG(INFO) << "h2o_start_response, content_type=" << state.res_content_type << ",response.status_code=" << state.res_status_code;*/ h2o_start_response(req, state.generator); } if(state.res_buff.len == 0 && state.send_state != H2O_SEND_STATE_FINAL) { // without this guard, http streaming will break state.generator->proceed(state.generator, req); return; } h2o_send(req, &state.res_buff, 1, state.send_state); //LOG(INFO) << "stream_response after send"; } void HttpServer::set_auth_handler(bool (*handler)(std::map<std::string, std::string>& params, std::vector<nlohmann::json>& embedded_params_vec, const std::string& body, const route_path& rpath, const std::string& auth_key)) { auth_handler = handler; } void HttpServer::get(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&), bool async_req, bool async_res) { std::vector<std::string> path_parts; StringUtils::split(path, path_parts, "/"); route_path rpath("GET", path_parts, handler, async_req, async_res); route_hash_to_path.emplace_back(rpath.route_hash(), rpath); route_hash_to_path_map.emplace(rpath.route_hash(), rpath); } void HttpServer::post(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&), bool async_req, bool async_res) { std::vector<std::string> path_parts; StringUtils::split(path, path_parts, "/"); route_path rpath("POST", path_parts, handler, async_req, async_res); route_hash_to_path.emplace_back(rpath.route_hash(), rpath); route_hash_to_path_map.emplace(rpath.route_hash(), rpath); } void HttpServer::put(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&), bool async_req, bool async_res) { std::vector<std::string> path_parts; StringUtils::split(path, path_parts, "/"); route_path rpath("PUT", path_parts, handler, async_req, async_res); route_hash_to_path.emplace_back(rpath.route_hash(), rpath); route_hash_to_path_map.emplace(rpath.route_hash(), rpath); } void HttpServer::patch(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&), bool async_req, bool async_res) { std::vector<std::string> path_parts; StringUtils::split(path, path_parts, "/"); route_path rpath("PATCH", path_parts, handler, async_req, async_res); route_hash_to_path.emplace_back(rpath.route_hash(), rpath); route_hash_to_path_map.emplace(rpath.route_hash(), rpath); } void HttpServer::del(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&), bool async_req, bool async_res) { std::vector<std::string> path_parts; StringUtils::split(path, path_parts, "/"); route_path rpath("DELETE", path_parts, handler, async_req, async_res); route_hash_to_path.emplace_back(rpath.route_hash(), rpath); route_hash_to_path_map.emplace(rpath.route_hash(), rpath); } void HttpServer::on(const std::string & message, bool (*handler)(void*)) { message_dispatcher->on(message, handler); } HttpServer::~HttpServer() { delete message_dispatcher; if(ssl_refresh_timer.timer.expire_at != 0) { // avoid callback since it recreates timeout clear_timeouts({&ssl_refresh_timer.timer}, false); } if(metrics_refresh_timer.timer.expire_at != 0) { // avoid callback since it recreates timeout clear_timeouts({&metrics_refresh_timer.timer}, false); } h2o_timerwheel_run(ctx.loop->_timeouts, 9999999999999); h2o_context_dispose(&ctx); if(ctx.globalconf->server_name.base != nullptr) { free(ctx.globalconf->server_name.base); ctx.globalconf->server_name.base = nullptr; } // Flaky, sometimes assertion on timeouts occur, preventing a clean shutdown //h2o_evloop_destroy(ctx.loop); h2o_config_dispose(&config); SSL_CTX_free(accept_ctx->ssl_ctx); delete accept_ctx; meta_thread_pool->shutdown(); delete meta_thread_pool; } http_message_dispatcher* HttpServer::get_message_dispatcher() const { return message_dispatcher; } ReplicationState* HttpServer::get_replication_state() const { return replication_state; } bool HttpServer::is_alive() const { return replication_state->is_alive(); } bool HttpServer::get_route(uint64_t hash, route_path** found_rpath) { auto route_hash_it = route_hash_to_path_map.find(hash); if(route_hash_it == route_hash_to_path_map.end()) { return false; } *found_rpath = &route_hash_it->second; return true; } uint64_t HttpServer::node_state() const { return replication_state->node_state(); } nlohmann::json HttpServer::node_status() { return replication_state->get_status(); } bool HttpServer::on_stream_response_message(void *data) { //LOG(INFO) << "on_stream_response_message"; auto req_res = static_cast<async_req_res_t *>(data); // NOTE: access to `req` and `res` objects must be synchronized and wrapped by `req_res` if(req_res->is_alive()) { stream_response(req_res->get_res_state()); } else { // serialized request or generator has been disposed (underlying request is probably dead) req_res->req_notify(); req_res->res_notify(); } if(req_res->destroy_after_use) { delete req_res; } return true; } bool HttpServer::on_request_proceed_message(void *data) { //LOG(INFO) << "on_request_proceed_message"; // This callback will run concurrently to batch indexer's run() so care must be taken to protect access // to variables that are written to by the batch indexer, which for now is only: last_chunk_aggregate (atomic) deferred_req_res_t* req_res = static_cast<deferred_req_res_t *>(data); if(req_res->res->is_alive) { auto stream_state = (req_res->req->last_chunk_aggregate) ? H2O_SEND_STATE_FINAL : H2O_SEND_STATE_IN_PROGRESS; size_t written = req_res->req->chunk_len; req_res->req->chunk_len = 0; if(req_res->req->_req && req_res->req->_req->proceed_req) { req_res->req->_req->proceed_req(req_res->req->_req, NULL); } } if(req_res->destroy_after_use) { delete req_res; } return true; } bool HttpServer::on_deferred_processing_message(void *data) { //LOG(INFO) << "on_deferred_processing_message"; defer_processing_t* defer = static_cast<defer_processing_t *>(data); //LOG(INFO) << "defer req count: " << defer->req.use_count(); defer->server->defer_processing(defer->req, defer->res, defer->timeout_ms); //LOG(INFO) << "req use count: " << defer->req.use_count() << ", req " << defer->req.get(); delete defer; return true; } bool HttpServer::has_exited() const { return exit_loop; } void HttpServer::do_snapshot(const std::string& snapshot_path, const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) { return replication_state->do_snapshot(snapshot_path, req, res); } bool HttpServer::trigger_vote() { return replication_state->trigger_vote(); } bool HttpServer::reset_peers() { return replication_state->reset_peers(); } ThreadPool* HttpServer::get_thread_pool() const { return thread_pool; } bool HttpServer::initialize_ssl_ctx(const char *cert_file, const char *key_file, h2o_accept_ctx_t* accept_ctx) { SSL_CTX* new_ctx = SSL_CTX_new(SSLv23_server_method()); // As recommended by: // https://github.com/ssllabs/research/wiki/SSL-and-TLS-Deployment-Best-Practices#23-use-secure-cipher-suites SSL_CTX_set_cipher_list(new_ctx, "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:" "ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:" "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:" "ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:" "DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256"); // Without this, DH and ECDH ciphers will be ignored by OpenSSL int nid = NID_X9_62_prime256v1; EC_KEY *key = EC_KEY_new_by_curve_name(nid); if (key == nullptr) { LOG(ERROR) << "Failed to create DH/ECDH."; return -1; } SSL_CTX_set_tmp_ecdh(new_ctx, key); EC_KEY_free(key); SSL_CTX_set_options(new_ctx, SSL_OP_NO_SSLv2); SSL_CTX_set_options(new_ctx, SSL_OP_NO_SSLv3); SSL_CTX_set_options(new_ctx, SSL_OP_NO_TLSv1); SSL_CTX_set_options(new_ctx, SSL_OP_NO_TLSv1_1); SSL_CTX_set_options(new_ctx, SSL_OP_SINGLE_ECDH_USE); if (SSL_CTX_use_certificate_chain_file(new_ctx, cert_file) != 1) { LOG(ERROR) << "An error occurred while trying to load server certificate file: " << cert_file; SSL_CTX_free(new_ctx); return false; } if (SSL_CTX_use_PrivateKey_file(new_ctx, key_file, SSL_FILETYPE_PEM) != 1) { LOG(ERROR) << "An error occurred while trying to load private key file: " << key_file; SSL_CTX_free(new_ctx); return false; } if(SSL_CTX_check_private_key(new_ctx) != 1) { LOG(ERROR) << "Private key validation failed for: " << key_file; SSL_CTX_free(new_ctx); return false; } h2o_ssl_register_alpn_protocols(new_ctx, h2o_http2_alpn_protocols); accept_ctx->ssl_ctx = new_ctx; return true; } void HttpServer::persist_applying_index() { return replication_state->persist_applying_index(); } int64_t HttpServer::get_num_queued_writes() { return replication_state->get_num_queued_writes(); } bool HttpServer::is_leader() const { return replication_state->is_leader(); } ThreadPool* HttpServer::get_meta_thread_pool() const { return meta_thread_pool; } void HttpServer::decr_pending_writes() { return replication_state->decr_pending_writes(); }
48,066
C++
.cpp
920
43.776087
167
0.625413
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,804
tokenizer.cpp
typesense_typesense/src/tokenizer.cpp
#include <sstream> #include <algorithm> #include <string_utils.h> #include "tokenizer.h" #include <unicode/uchar.h> Tokenizer::Tokenizer(const std::string& input, bool normalize, bool no_op, const std::string& locale, const std::vector<char>& symbols_to_index, const std::vector<char>& separators, std::shared_ptr<Stemmer> stemmer) : i(0), normalize(normalize), no_op(no_op), locale(locale), stemmer(stemmer) { for(char c: symbols_to_index) { index_symbols[uint8_t(c)] = 1; } for(char c: separators) { separator_symbols[uint8_t(c)] = 1; } UErrorCode errcode = U_ZERO_ERROR; if(locale == "ko") { nfkd = icu::Normalizer2::getNFKDInstance(errcode); } if(locale == "th") { nfkc = icu::Normalizer2::getNFKCInstance(errcode); } cd = iconv_open("ASCII//TRANSLIT", "UTF-8"); init(input); } void Tokenizer::init(const std::string& input) { // init() can be called multiple times safely without leaking memory as we check for prior initialization if(normalized_text) { free(normalized_text); normalized_text = nullptr; } if(locale == "zh") { UErrorCode translit_status = U_ZERO_ERROR; if(!transliterator) { transliterator = icu::Transliterator::createInstance("Traditional-Simplified", UTRANS_FORWARD, translit_status); } if(U_FAILURE(translit_status)) { //LOG(ERROR) << "Unable to create transliteration instance for `zh` locale."; transliterator = nullptr; text = input; } else { icu::UnicodeString unicode_input = icu::UnicodeString::fromUTF8(input); transliterator->transliterate(unicode_input); std::string output; unicode_input.toUTF8String(output); normalized_text = (char *)malloc(output.size()+1); strcpy(normalized_text, output.c_str()); text = normalized_text; } } else if(locale == "ja") { if(normalize) { normalized_text = JapaneseLocalizer::get_instance().normalize(input); text = normalized_text; } else { text = input; } } else if(is_cyrillic(locale)) { // init transliterator but will only transliterate during tokenization UErrorCode translit_status = U_ZERO_ERROR; if(!transliterator) { transliterator = icu::Transliterator::createInstance("Any-Latin; Latin-ASCII", UTRANS_FORWARD, translit_status); } text = input; } else { text = input; } if(!locale.empty() && locale != "en") { UErrorCode status = U_ZERO_ERROR; const icu::Locale& icu_locale = icu::Locale(locale.c_str()); if(!bi) { bi = icu::BreakIterator::createWordInstance(icu_locale, status); } unicode_text = icu::UnicodeString::fromUTF8(text); if(locale == "fa") { icu::UnicodeString target_str; target_str.setTo(0x200C); // U+200C (ZERO WIDTH NON-JOINER) unicode_text.findAndReplace(target_str, " "); } bi->setText(unicode_text); start_pos = bi->first(); end_pos = bi->next(); utf8_start_index = 0; } } bool Tokenizer::belongs_to_general_punctuation_unicode_block(UChar c) { UBlockCode blockCode = ublock_getCode(c); return blockCode == UBLOCK_GENERAL_PUNCTUATION; } bool Tokenizer::next(std::string &token, size_t& token_index, size_t& start_index, size_t& end_index) { if(no_op) { if(i == text.size()) { return false; } token = text; i = text.size(); start_index = 0; end_index = text.size() - 1; return true; } if(!locale.empty() && locale != "en") { while (end_pos != icu::BreakIterator::DONE) { //LOG(INFO) << "Position: " << start_pos; std::string word; if(locale == "ko") { UErrorCode errcode = U_ZERO_ERROR; icu::UnicodeString src = unicode_text.tempSubStringBetween(start_pos, end_pos); icu::UnicodeString dst; nfkd->normalize(src, dst, errcode); if(!U_FAILURE(errcode)) { dst.toUTF8String(word); } else { LOG(ERROR) << "Unicode error during parsing: " << errcode; } } else if(normalize && is_cyrillic(locale)) { auto raw_text = unicode_text.tempSubStringBetween(start_pos, end_pos); if(stemmer) { std::string stemmed_word; raw_text.toUTF8String(stemmed_word); stemmed_word = stemmer->stem(stemmed_word); raw_text = icu::UnicodeString::fromUTF8(stemmed_word); } transliterator->transliterate(raw_text); raw_text.toUTF8String(word); StringUtils::replace_all(word, "\"", ""); } else if(normalize && locale == "th") { UErrorCode errcode = U_ZERO_ERROR; icu::UnicodeString src = unicode_text.tempSubStringBetween(start_pos, end_pos); icu::UnicodeString dst; nfkc->normalize(src, dst, errcode); if(!U_FAILURE(errcode)) { icu::UnicodeString transformedString; for (int32_t t = 0; t < dst.length(); t++) { if (!belongs_to_general_punctuation_unicode_block(dst[t])) { transformedString += dst[t]; } } transformedString.toUTF8String(word); } else { LOG(ERROR) << "Unicode error during parsing: " << errcode; } } else if(normalize && locale == "ja") { auto raw_text = unicode_text.tempSubStringBetween(start_pos, end_pos); raw_text.toUTF8String(word); char* normalized_word = JapaneseLocalizer::get_instance().normalize(word); word.assign(normalized_word, strlen(normalized_word)); free(normalized_word); } else { unicode_text.tempSubStringBetween(start_pos, end_pos).foldCase().toUTF8String(word); } bool emit_token = false; size_t orig_word_size = word.size(); if(locale == "zh" && (word == "," || word == "─" || word == "。")) { emit_token = false; } else if(locale == "ko" && word == "·") { emit_token = false; } else { // Some special characters like punctuations arrive as independent units, while others like // underscore and quotes are present within the string. We will have to handle both cases. size_t read_index = 0, write_index = 0; while (read_index < word.size()) { size_t this_stream_mode = get_stream_mode(word[read_index]); if (!is_ascii_char(word[read_index]) || this_stream_mode == INDEX) { word[write_index++] = std::tolower(word[read_index]); } read_index++; } // resize to fit new length word.resize(write_index); if(!word.empty()) { out += word; emit_token = true; } } if(emit_token) { token = out; token_index = token_counter++; out.clear(); } start_index = utf8_start_index; end_index = utf8_start_index + orig_word_size - 1; utf8_start_index = end_index + 1; start_pos = end_pos; end_pos = bi->next(); if(emit_token) { return true; } } if(stemmer && !is_cyrillic(locale)) { // cyrillic is already stemmed prior to transliteration token = stemmer->stem(out); } else { token = out; } out.clear(); start_index = utf8_start_index; end_index = text.size() - 1; if(token.empty()) { return false; } token_index = token_counter++; return true; } while(i < text.length()) { if(is_ascii_char(text[i])) { size_t this_stream_mode = get_stream_mode(text[i]); if(this_stream_mode == SKIP) { i++; continue; } if(this_stream_mode == SEPARATE) { if(out.empty()) { i++; continue; } if(stemmer) { token = stemmer->stem(out); } else { token = out; } out.clear(); token_index = token_counter++; end_index = i - 1; i++; return true; } else { if(out.empty()) { start_index = i; } out += normalize ? char(std::tolower(text[i])) : text[i]; i++; continue; } } if(out.empty()) { start_index = i; } char inbuf[5]; char *p = inbuf; // group bytes to form a unicode representation *p++ = text[i++]; if ((text[i] & 0xC0) == 0x80) *p++ = text[i++]; if ((text[i] & 0xC0) == 0x80) *p++ = text[i++]; if ((text[i] & 0xC0) == 0x80) *p++ = text[i++]; *p = 0; size_t insize = (p - &inbuf[0]); if(!normalize) { out += inbuf; continue; } char outbuf[5] = {}; size_t outsize = sizeof(outbuf); char *outptr = outbuf; char *inptr = inbuf; //printf("[%s]\n", inbuf); errno = 0; iconv(cd, &inptr, &insize, &outptr, &outsize); // this can be handled by ICU via "Latin-ASCII" if(errno == EILSEQ) { // symbol cannot be represented as ASCII, so write the original symbol out += inbuf; } else { for(size_t out_index=0; out_index<5; out_index++) { if(!normalize) { out += outbuf[out_index]; continue; } bool unicode_is_ascii = is_ascii_char(outbuf[out_index]); bool keep_char = !unicode_is_ascii || std::isalnum(outbuf[out_index]); if(keep_char) { if(unicode_is_ascii && std::isalnum(outbuf[out_index])) { outbuf[out_index] = char(std::tolower(outbuf[out_index])); } out += outbuf[out_index]; } } } } if(stemmer) { token = stemmer->stem(out); } else { token = out; } out.clear(); end_index = i - 1; if(token.empty()) { return false; } token_index = token_counter++; return true; } void Tokenizer::tokenize(std::vector<std::string> &tokens) { std::string token; size_t token_index; while(next(token, token_index)) { tokens.push_back(token); } } bool Tokenizer::tokenize(std::string& token) { size_t token_index = 0; init(token); return next(token, token_index); } bool Tokenizer::next(std::string &token, size_t &token_index) { size_t start_index = 0, end_index = 0; return next(token, token_index, start_index, end_index); } bool Tokenizer::is_cyrillic(const std::string& locale) { return locale == "el" || locale == "bg" || locale == "ru" || locale == "sr" || locale == "uk" || locale == "be"; } void Tokenizer::decr_token_counter() { if(token_counter > 0) { token_counter--; } } bool Tokenizer::should_skip_char(char c) { return is_ascii_char(c) && get_stream_mode(c) != INDEX; } std::string Tokenizer::normalize_ascii_no_spaces(const std::string& text) { std::string analytics_query = text; StringUtils::trim(analytics_query); for(size_t i = 0; i < analytics_query.size(); i++) { if(is_ascii_char(text[i])) { analytics_query[i] = std::tolower(analytics_query[i]); } } return analytics_query; } bool Tokenizer::has_word_tokenizer(const std::string& locale) { bool use_word_tokenizer = locale == "th" || locale == "ja" || Tokenizer::is_cyrillic(locale); return use_word_tokenizer; }
12,931
C++
.cpp
334
26.934132
109
0.5123
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,805
filter.cpp
typesense_typesense/src/filter.cpp
#include <collection_manager.h> #include <posting.h> #include <timsort.hpp> #include <stack> #include "filter.h" Option<bool> filter::validate_numerical_filter_value(field _field, const string &raw_value) { if(_field.is_int32()) { if (!StringUtils::is_integer(raw_value)) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: Not an int32."); } else if (!StringUtils::is_int32_t(raw_value)) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: `" + raw_value + "` exceeds the range of an int32."); } return Option<bool>(true); } else if(_field.is_int64() && !StringUtils::is_int64_t(raw_value)) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: Not an int64."); } else if(_field.is_float() && !StringUtils::is_float(raw_value)) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: Not a float."); } return Option<bool>(true); } Option<NUM_COMPARATOR> filter::extract_num_comparator(string &comp_and_value) { auto num_comparator = EQUALS; if(StringUtils::is_integer(comp_and_value) || StringUtils::is_float(comp_and_value)) { num_comparator = EQUALS; } // the ordering is important - we have to compare 2-letter operators first else if(comp_and_value.compare(0, 2, "<=") == 0) { num_comparator = LESS_THAN_EQUALS; } else if(comp_and_value.compare(0, 2, ">=") == 0) { num_comparator = GREATER_THAN_EQUALS; } else if(comp_and_value.compare(0, 2, "!=") == 0) { num_comparator = NOT_EQUALS; } else if(comp_and_value.compare(0, 1, "<") == 0) { num_comparator = LESS_THAN; } else if(comp_and_value.compare(0, 1, ">") == 0) { num_comparator = GREATER_THAN; } // "=" case is handled upstream. else if(comp_and_value.find("..") != std::string::npos) { num_comparator = RANGE_INCLUSIVE; } else { return Option<NUM_COMPARATOR>(400, "Numerical field has an invalid comparator."); } if(num_comparator == LESS_THAN || num_comparator == GREATER_THAN) { comp_and_value = comp_and_value.substr(1); } else if(num_comparator == LESS_THAN_EQUALS || num_comparator == GREATER_THAN_EQUALS || num_comparator == NOT_EQUALS) { comp_and_value = comp_and_value.substr(2); } comp_and_value = StringUtils::trim(comp_and_value); return Option<NUM_COMPARATOR>(num_comparator); } Option<bool> filter::parse_geopoint_filter_value(std::string& raw_value, const std::string& format_err_msg, std::string& processed_filter_val, NUM_COMPARATOR& num_comparator) { num_comparator = LESS_THAN_EQUALS; if(!(raw_value[0] == '(' && raw_value[raw_value.size() - 1] == ')')) { return Option<bool>(400, format_err_msg); } std::vector<std::string> filter_values; auto raw_val_without_paran = raw_value.substr(1, raw_value.size() - 2); StringUtils::split(raw_val_without_paran, filter_values, ","); // we will end up with: "10.45 34.56 2 km" or "10.45 34.56 2mi" or a geo polygon if(filter_values.size() < 3) { return Option<bool>(400, format_err_msg); } // do validation: format should match either a point + radius or polygon size_t num_floats = 0; for(const auto& fvalue: filter_values) { if(StringUtils::is_float(fvalue)) { num_floats++; } } bool is_polygon = (num_floats == filter_values.size()); if(!is_polygon) { // we have to ensure that this is a point + radius match if(!StringUtils::is_float(filter_values[0]) || !StringUtils::is_float(filter_values[1])) { return Option<bool>(400, format_err_msg); } if(filter_values[0] == "nan" || filter_values[0] == "NaN" || filter_values[1] == "nan" || filter_values[1] == "NaN") { return Option<bool>(400, format_err_msg); } } if(is_polygon) { processed_filter_val = raw_val_without_paran; } else { // point + radius // filter_values[2] is distance, get the unit, validate it and split on that if(filter_values[2].size() < 2) { return Option<bool>(400, "Unit must be either `km` or `mi`."); } std::string unit = filter_values[2].substr(filter_values[2].size()-2, 2); if(unit != "km" && unit != "mi") { return Option<bool>(400, "Unit must be either `km` or `mi`."); } std::vector<std::string> dist_values; StringUtils::split(filter_values[2], dist_values, unit); if(dist_values.size() != 1) { return Option<bool>(400, format_err_msg); } if(!StringUtils::is_float(dist_values[0])) { return Option<bool>(400, format_err_msg); } processed_filter_val = filter_values[0] + ", " + filter_values[1] + ", " + // co-ords dist_values[0] + ", " + unit; // X km } return Option<bool>(true); } Option<bool> validate_geofilter_distance(std::string& raw_value, const string& format_err_msg, std::string& distance, std::string& unit) { if (raw_value.size() < 2) { return Option<bool>(400, "Unit must be either `km` or `mi`."); } unit = raw_value.substr(raw_value.size() - 2, 2); if (unit != "km" && unit != "mi") { return Option<bool>(400, "Unit must be either `km` or `mi`."); } std::vector<std::string> dist_values; StringUtils::split(raw_value, dist_values, unit); if (dist_values.size() != 1) { return Option<bool>(400, format_err_msg); } if (!StringUtils::is_float(dist_values[0])) { return Option<bool>(400, format_err_msg); } distance = std::string(dist_values[0]); return Option<bool>(true); } Option<bool> filter::parse_geopoint_filter_value(string& raw_value, const string& format_err_msg, filter& filter_exp) { // FORMAT: // [ ([48.853, 2.344], radius: 1km, exact_filter_radius: 100km), // ([48.8662, 2.3255, 48.8581, 2.3209, 48.8561, 2.3448, 48.8641, 2.3469], exact_filter_radius: 100km) ] // Every open parenthesis represent a geo filter value. auto open_parenthesis_count = std::count(raw_value.begin(), raw_value.end(), '('); if (open_parenthesis_count < 1) { return Option<bool>(400, format_err_msg); } filter_exp.comparators.push_back(LESS_THAN_EQUALS); bool is_multivalued = raw_value[0] == '['; size_t i = is_multivalued; for (auto j = 0; j < open_parenthesis_count; j++) { if (is_multivalued) { auto pos = raw_value.find('(', i); if (pos == std::string::npos) { return Option<bool>(400, format_err_msg); } i = pos; } i++; if (i >= raw_value.size()) { return Option<bool>(400, format_err_msg); } auto value_end_index = raw_value.find(')', i); if (value_end_index == std::string::npos) { return Option<bool>(400, format_err_msg); } // [48.853, 2.344], radius: 1km, exact_filter_radius: 100km // [48.8662, 2.3255, 48.8581, 2.3209, 48.8561, 2.3448, 48.8641, 2.3469], exact_filter_radius: 100km std::string value_str = raw_value.substr(i, value_end_index - i); StringUtils::trim(value_str); if (value_str.empty() || value_str[0] != '[' || value_str.find(']', 1) == std::string::npos) { return Option<bool>(400, format_err_msg); } auto points_str = value_str.substr(1, value_str.find(']', 1) - 1); std::vector<std::string> geo_points; StringUtils::split(points_str, geo_points, ","); if (geo_points.size() < 2 || geo_points.size() % 2) { return Option<bool>(400, format_err_msg); } bool is_polygon = geo_points.size() > 2; for (const auto& geo_point: geo_points) { if (geo_point == "nan" || geo_point == "NaN" || !StringUtils::is_float(geo_point)) { return Option<bool>(400, format_err_msg); } } if (is_polygon) { filter_exp.values.push_back(points_str); } // Handle options. // , radius: 1km, exact_filter_radius: 100km i = raw_value.find(']', i) + 1; std::vector<std::string> options; StringUtils::split(raw_value.substr(i, value_end_index - i), options, ","); if (options.empty()) { if (!is_polygon) { // Missing radius option return Option<bool>(400, format_err_msg); } nlohmann::json param; param[EXACT_GEO_FILTER_RADIUS_KEY] = DEFAULT_EXACT_GEO_FILTER_RADIUS_VALUE; filter_exp.params.push_back(param); continue; } bool is_radius_present = false; for (auto const& option: options) { std::vector<std::string> key_value; StringUtils::split(option, key_value, ":"); if (key_value.size() < 2) { continue; } if (key_value[0] == GEO_FILTER_RADIUS_KEY && !is_polygon) { is_radius_present = true; std::string distance, unit; auto validate_op = validate_geofilter_distance(key_value[1], format_err_msg, distance, unit); if (!validate_op.ok()) { return validate_op; } filter_exp.values.push_back(points_str + ", " + distance + ", " + unit); } else if (key_value[0] == EXACT_GEO_FILTER_RADIUS_KEY) { std::string distance, unit; auto validate_op = validate_geofilter_distance(key_value[1], format_err_msg, distance, unit); if (!validate_op.ok()) { return validate_op; } double exact_under_radius = std::stof(distance); if (unit == "km") { exact_under_radius *= 1000; } else { // assume "mi" (validated upstream) exact_under_radius *= 1609.34; } nlohmann::json param; param[EXACT_GEO_FILTER_RADIUS_KEY] = exact_under_radius; filter_exp.params.push_back(param); // Only EXACT_GEO_FILTER_RADIUS_KEY option would be present for a polygon. We can also stop if we've // parsed the radius in case of a single geopoint since there are only two options. if (is_polygon || is_radius_present) { break; } } } if (!is_radius_present && !is_polygon) { return Option<bool>(400, format_err_msg); } // EXACT_GEO_FILTER_RADIUS_KEY was not present. if (filter_exp.params.size() < filter_exp.values.size()) { nlohmann::json param; param[EXACT_GEO_FILTER_RADIUS_KEY] = DEFAULT_EXACT_GEO_FILTER_RADIUS_VALUE; filter_exp.params.push_back(param); } } return Option<bool>(true); } bool isOperator(const std::string& expression) { return expression == "&&" || expression == "||"; } // https://en.wikipedia.org/wiki/Shunting_yard_algorithm Option<bool> toPostfix(std::queue<std::string>& tokens, std::queue<std::string>& postfix) { std::stack<std::string> operatorStack; while (!tokens.empty()) { auto expression = tokens.front(); tokens.pop(); if (isOperator(expression)) { // We only have two operators &&, || having the same precedence and both being left associative. while (!operatorStack.empty() && operatorStack.top() != "(") { postfix.push(operatorStack.top()); operatorStack.pop(); } operatorStack.push(expression); } else if (expression == "(") { operatorStack.push(expression); } else if (expression == ")") { while (!operatorStack.empty() && operatorStack.top() != "(") { postfix.push(operatorStack.top()); operatorStack.pop(); } if (operatorStack.empty() || operatorStack.top() != "(") { return Option<bool>(400, "Could not parse the filter query: unbalanced parentheses."); } operatorStack.pop(); } else { postfix.push(expression); } } while (!operatorStack.empty()) { if (operatorStack.top() == "(") { return Option<bool>(400, "Could not parse the filter query: unbalanced parentheses."); } postfix.push(operatorStack.top()); operatorStack.pop(); } return Option<bool>(true); } Option<bool> toMultiValueNumericFilter(std::string& raw_value, filter& filter_exp, const field& _field) { std::vector<std::string> filter_values; StringUtils::split(raw_value.substr(1, raw_value.size() - 2), filter_values, ","); filter_exp = {_field.name, {}, {}}; for (std::string& filter_value: filter_values) { Option<NUM_COMPARATOR> op_comparator = filter::extract_num_comparator(filter_value); if (!op_comparator.ok()) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: " + op_comparator.error()); } if (op_comparator.get() == RANGE_INCLUSIVE) { // split the value around range operator to extract bounds std::vector<std::string> range_values; StringUtils::split(filter_value, range_values, filter::RANGE_OPERATOR()); for (const std::string& range_value: range_values) { auto validate_op = filter::validate_numerical_filter_value(_field, range_value); if (!validate_op.ok()) { return validate_op; } filter_exp.values.push_back(range_value); filter_exp.comparators.push_back(op_comparator.get()); } } else { auto validate_op = filter::validate_numerical_filter_value(_field, filter_value); if (!validate_op.ok()) { return validate_op; } filter_exp.values.push_back(filter_value); filter_exp.comparators.push_back(op_comparator.get()); } } return Option<bool>(true); } Option<bool> toFilter(const std::string expression, filter& filter_exp, const tsl::htrie_map<char, field>& search_schema, const Store* store, const std::string& doc_id_prefix) { // split into [field_name, value] size_t found_index = expression.find(':'); if (found_index == std::string::npos) { return Option<bool>(400, "Could not parse the filter query."); } std::string&& field_name = expression.substr(0, found_index); StringUtils::trim(field_name); if (field_name == "id") { std::string&& raw_value = expression.substr(found_index + 1, std::string::npos); StringUtils::trim(raw_value); std::string empty_filter_err = "Error with filter field `id`: Filter value cannot be empty."; if (raw_value.empty()) { return Option<bool>(400, empty_filter_err); } filter_exp = {field_name, {}, {}}; NUM_COMPARATOR id_comparator = EQUALS; size_t filter_value_index = 0; if (raw_value[0] == '=') { id_comparator = EQUALS; while (++filter_value_index < raw_value.size() && raw_value[filter_value_index] == ' '); } else if (raw_value.size() >= 2 && raw_value[0] == '!' && raw_value[1] == '=') { id_comparator = NOT_EQUALS; filter_exp.apply_not_equals = true; filter_value_index++; while (++filter_value_index < raw_value.size() && raw_value[filter_value_index] == ' '); } if (filter_value_index != 0) { raw_value = raw_value.substr(filter_value_index); } if (raw_value.empty()) { return Option<bool>(400, empty_filter_err); } if (raw_value[0] == '[' && raw_value[raw_value.size() - 1] == ']') { std::vector<std::string> doc_ids; StringUtils::split_to_values(raw_value.substr(1, raw_value.size() - 2), doc_ids); for (std::string& doc_id: doc_ids) { if (doc_id == "*") { filter_exp.values.clear(); filter_exp.values.emplace_back("*"); filter_exp.comparators.clear(); filter_exp.comparators.push_back(id_comparator); break; } // we have to convert the doc_id to seq id std::string seq_id_str; StoreStatus seq_id_status = store->get(doc_id_prefix + doc_id, seq_id_str); if (seq_id_status != StoreStatus::FOUND) { continue; } filter_exp.values.push_back(seq_id_str); filter_exp.comparators.push_back(id_comparator); } } else { std::vector<std::string> doc_ids; StringUtils::split_to_values(raw_value, doc_ids); // to handle backticks if (doc_ids.empty()) { return Option<bool>(400, empty_filter_err); } if (doc_ids[0] == "*") { filter_exp.values.emplace_back("*"); filter_exp.comparators.push_back(id_comparator); } else { std::string seq_id_str; StoreStatus seq_id_status = store->get(doc_id_prefix + doc_ids[0], seq_id_str); if (seq_id_status == StoreStatus::FOUND) { filter_exp.values.push_back(seq_id_str); filter_exp.comparators.push_back(id_comparator); } } } return Option<bool>(true); } auto field_it = search_schema.find(field_name); if (field_it == search_schema.end()) { return Option<bool>(404, "Could not find a filter field named `" + field_name + "` in the schema."); } if (field_it->num_dim > 0) { return Option<bool>(404, "Cannot filter on vector field `" + field_name + "`."); } const field& _field = field_it.value(); std::string&& raw_value = expression.substr(found_index + 1, std::string::npos); StringUtils::trim(raw_value); // skip past optional `:=` operator, which has no meaning for non-string fields if (!_field.is_string() && raw_value[0] == '=') { size_t filter_value_index = 0; while (raw_value[++filter_value_index] == ' '); raw_value = raw_value.substr(filter_value_index); } if (_field.is_integer() || _field.is_float()) { // could be a single value or a list if (raw_value[0] == '[' && raw_value[raw_value.size() - 1] == ']') { Option<bool> op = toMultiValueNumericFilter(raw_value, filter_exp, _field); if (!op.ok()) { return op; } } else { Option<NUM_COMPARATOR> op_comparator = filter::extract_num_comparator(raw_value); if (!op_comparator.ok()) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: " + op_comparator.error()); } if (op_comparator.get() == RANGE_INCLUSIVE) { // split the value around range operator to extract bounds std::vector<std::string> range_values; StringUtils::split(raw_value, range_values, filter::RANGE_OPERATOR()); filter_exp.field_name = field_name; for (const std::string& range_value: range_values) { auto validate_op = filter::validate_numerical_filter_value(_field, range_value); if (!validate_op.ok()) { return validate_op; } filter_exp.values.push_back(range_value); filter_exp.comparators.push_back(op_comparator.get()); } } else if (op_comparator.get() == NOT_EQUALS && raw_value[0] == '[' && raw_value[raw_value.size() - 1] == ']') { Option<bool> op = toMultiValueNumericFilter(raw_value, filter_exp, _field); if (!op.ok()) { return op; } filter_exp.apply_not_equals = true; } else { auto validate_op = filter::validate_numerical_filter_value(_field, raw_value); if (!validate_op.ok()) { return validate_op; } filter_exp = {field_name, {raw_value}, {op_comparator.get()}}; } } } else if (_field.is_bool()) { NUM_COMPARATOR bool_comparator = EQUALS; size_t filter_value_index = 0; if (raw_value[0] == '=') { bool_comparator = EQUALS; while (++filter_value_index < raw_value.size() && raw_value[filter_value_index] == ' '); } else if (raw_value.size() >= 2 && raw_value[0] == '!' && raw_value[1] == '=') { bool_comparator = NOT_EQUALS; filter_value_index++; while (++filter_value_index < raw_value.size() && raw_value[filter_value_index] == ' '); } if (filter_value_index != 0) { raw_value = raw_value.substr(filter_value_index); } if (filter_value_index == raw_value.size()) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: Filter value cannot be empty."); } if (raw_value[0] == '[' && raw_value[raw_value.size() - 1] == ']') { std::vector<std::string> filter_values; StringUtils::split(raw_value.substr(1, raw_value.size() - 2), filter_values, ","); filter_exp = {field_name, {}, {}}; for (std::string& filter_value: filter_values) { if (filter_value != "true" && filter_value != "false") { return Option<bool>(400, "Values of filter field `" + _field.name + "`: must be `true` or `false`."); } filter_value = (filter_value == "true") ? "1" : "0"; filter_exp.values.push_back(filter_value); filter_exp.comparators.push_back(bool_comparator); } } else { if (raw_value != "true" && raw_value != "false") { return Option<bool>(400, "Value of filter field `" + _field.name + "` must be `true` or `false`."); } std::string bool_value = (raw_value == "true") ? "1" : "0"; filter_exp = {field_name, {bool_value}, {bool_comparator}}; } } else if (_field.is_geopoint()) { filter_exp = {field_name, {}, {}}; NUM_COMPARATOR num_comparator; if ((raw_value[0] == '(' && std::count(raw_value.begin(), raw_value.end(), '[') > 0) || std::count(raw_value.begin(), raw_value.end(), '[') > 1 || std::count(raw_value.begin(), raw_value.end(), ':') > 0) { const std::string& format_err_msg = "Value of filter field `" + _field.name + "`: must be in the " "`([-44.50, 170.29], radius: 0.75 km, exact_filter_radius: 5 km)` or " "([56.33, -65.97, 23.82, -127.82], exact_filter_radius: 7 km) format."; auto parse_op = filter::parse_geopoint_filter_value(raw_value, format_err_msg, filter_exp); return parse_op; } const std::string& format_err_msg = "Value of filter field `" + _field.name + "`: must be in the `(-44.50, 170.29, 0.75 km)` or " "(56.33, -65.97, 23.82, -127.82) format."; // could be a single value or a list if (raw_value[0] == '[' && raw_value[raw_value.size() - 1] == ']') { std::vector<std::string> filter_values; StringUtils::split(raw_value.substr(1, raw_value.size() - 2), filter_values, "),"); for (std::string& filter_value: filter_values) { filter_value += ")"; std::string processed_filter_val; auto parse_op = filter::parse_geopoint_filter_value(filter_value, format_err_msg, processed_filter_val, num_comparator); if (!parse_op.ok()) { return parse_op; } filter_exp.values.push_back(processed_filter_val); filter_exp.comparators.push_back(num_comparator); } } else { // single value, e.g. (10.45, 34.56, 2 km) std::string processed_filter_val; auto parse_op = filter::parse_geopoint_filter_value(raw_value, format_err_msg, processed_filter_val, num_comparator); if (!parse_op.ok()) { return parse_op; } filter_exp.values.push_back(processed_filter_val); filter_exp.comparators.push_back(num_comparator); } } else if (_field.is_string()) { size_t filter_value_index = 0; NUM_COMPARATOR str_comparator = CONTAINS; auto apply_not_equals = false; if (raw_value[0] == '=') { // string filter should be evaluated in strict "equals" mode str_comparator = EQUALS; while (++filter_value_index < raw_value.size() && raw_value[filter_value_index] == ' '); } else if (raw_value.size() >= 2 && raw_value[0] == '!') { if (raw_value[1] == '=') { str_comparator = NOT_EQUALS; filter_value_index++; } apply_not_equals = true; while (++filter_value_index < raw_value.size() && raw_value[filter_value_index] == ' '); } if (filter_value_index == raw_value.size()) { return Option<bool>(400, "Error with filter field `" + _field.name + "`: Filter value cannot be empty."); } if (raw_value[filter_value_index] == '[' && raw_value[raw_value.size() - 1] == ']') { std::vector<std::string> filter_values; StringUtils::split_to_values( raw_value.substr(filter_value_index + 1, raw_value.size() - filter_value_index - 2), filter_values); if(_field.stem) { auto stemmer = _field.get_stemmer(); for (std::string& filter_value: filter_values) { filter_value = stemmer->stem(filter_value); } } filter_exp = {field_name, filter_values, {str_comparator}}; } else { std::string filter_value = raw_value.substr(filter_value_index); if(_field.stem) { auto stemmer = _field.get_stemmer(); filter_value = stemmer->stem(filter_value); } filter_exp = {field_name, {filter_value}, {str_comparator}}; } filter_exp.apply_not_equals = apply_not_equals; } else { return Option<bool>(400, "Error with filter field `" + _field.name + "`: Unidentified field data type, see docs for supported data types."); } return Option<bool>(true); } // https://stackoverflow.com/a/423914/11218270 Option<bool> toParseTree(std::queue<std::string>& postfix, filter_node_t*& root, const tsl::htrie_map<char, field>& search_schema, const Store* store, const std::string& doc_id_prefix) { std::stack<filter_node_t*> nodeStack; bool is_successful = true; std::string error_message; filter_node_t *filter_node = nullptr; while (!postfix.empty()) { const std::string expression = postfix.front(); postfix.pop(); if (isOperator(expression)) { if (nodeStack.empty()) { is_successful = false; error_message = "Could not parse the filter query: unbalanced `" + expression + "` operands."; break; } auto operandB = nodeStack.top(); nodeStack.pop(); if (nodeStack.empty()) { delete operandB; is_successful = false; error_message = "Could not parse the filter query: unbalanced `" + expression + "` operands."; break; } auto operandA = nodeStack.top(); nodeStack.pop(); filter_node = new filter_node_t(expression == "&&" ? AND : OR, operandA, operandB); filter_node->filter_query = operandA->filter_query + " " + expression + " " + operandB->filter_query; } else { filter filter_exp; // Expected value: $Collection(...) bool is_referenced_filter = (expression[0] == '$' && expression[expression.size() - 1] == ')'); if (is_referenced_filter) { size_t parenthesis_index = expression.find('('); std::string collection_name = expression.substr(1, parenthesis_index - 1); auto &cm = CollectionManager::get_instance(); auto collection = cm.get_collection(collection_name); if (collection == nullptr) { is_successful = false; error_message = "Referenced collection `" + collection_name + "` not found."; break; } filter_exp = {expression.substr(parenthesis_index + 1, expression.size() - parenthesis_index - 2)}; filter_exp.referenced_collection_name = collection_name; } else { Option<bool> toFilter_op = toFilter(expression, filter_exp, search_schema, store, doc_id_prefix); if (!toFilter_op.ok()) { is_successful = false; error_message = toFilter_op.error(); break; } } filter_node = new filter_node_t(filter_exp); filter_node->filter_query = expression; } nodeStack.push(filter_node); } if (!is_successful) { while (!nodeStack.empty()) { auto filterNode = nodeStack.top(); delete filterNode; nodeStack.pop(); } return Option<bool>(400, error_message); } if (nodeStack.empty()) { return Option<bool>(400, "Filter query cannot be empty."); } root = nodeStack.top(); return Option<bool>(true); } Option<bool> filter::parse_filter_query(const std::string& filter_query, const tsl::htrie_map<char, field>& search_schema, const Store* store, const std::string& doc_id_prefix, filter_node_t*& root) { auto _filter_query = filter_query; StringUtils::trim(_filter_query); if (_filter_query.empty()) { return Option<bool>(true); } std::queue<std::string> tokens; Option<bool> tokenize_op = StringUtils::tokenize_filter_query(filter_query, tokens); if (!tokenize_op.ok()) { return tokenize_op; } std::queue<std::string> postfix; Option<bool> toPostfix_op = toPostfix(tokens, postfix); if (!toPostfix_op.ok()) { return toPostfix_op; } auto const& max_ops = CollectionManager::get_instance().filter_by_max_ops; if (postfix.size() > max_ops) { return Option<bool>(400, "`filter_by` has too many operations. Maximum allowed: " + std::to_string(max_ops) + ". Use `--filter-by-max-ops` command line argument to customize this value."); } Option<bool> toParseTree_op = toParseTree(postfix, root, search_schema, store, doc_id_prefix); if (!toParseTree_op.ok()) { return toParseTree_op; } root->filter_query = filter_query; return Option<bool>(true); }
32,971
C++
.cpp
690
35.550725
124
0.534822
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,806
numeric_range_trie.cpp
typesense_typesense/src/numeric_range_trie.cpp
#include <timsort.hpp> #include <set> #include "numeric_range_trie.h" #include "array_utils.h" void NumericTrie::insert(const int64_t& value, const uint32_t& seq_id) { if (value < 0) { if (negative_trie == nullptr) { negative_trie = new NumericTrie::Node(); } negative_trie->insert(std::abs(value), seq_id, max_level); } else { if (positive_trie == nullptr) { positive_trie = new NumericTrie::Node(); } positive_trie->insert(value, seq_id, max_level); } } void NumericTrie::remove(const int64_t& value, const uint32_t& seq_id) { if ((value < 0 && negative_trie == nullptr) || (value >= 0 && positive_trie == nullptr)) { return; } if (value < 0) { negative_trie->remove(std::abs(value), seq_id, max_level); } else { positive_trie->remove(value, seq_id, max_level); } } void NumericTrie::insert_geopoint(const uint64_t& cell_id, const uint32_t& seq_id) { if (positive_trie == nullptr) { positive_trie = new NumericTrie::Node(); } positive_trie->insert_geopoint(cell_id, seq_id, max_level); } void NumericTrie::search_geopoints(const std::vector<uint64_t>& cell_ids, std::vector<uint32_t>& geo_result_ids) { if (positive_trie == nullptr) { return; } positive_trie->search_geopoints(cell_ids, max_level, geo_result_ids); } void NumericTrie::delete_geopoint(const uint64_t& cell_id, uint32_t id) { if (positive_trie == nullptr) { return; } positive_trie->delete_geopoint(cell_id, id, max_level); } void NumericTrie::search_range(const int64_t& low, const bool& low_inclusive, const int64_t& high, const bool& high_inclusive, uint32_t*& ids, uint32_t& ids_length) { if (low > high) { return; } if (low < 0 && high >= 0) { // Have to combine the results of >low from negative_trie and <high from positive_trie if (negative_trie != nullptr && !(low == -1 && !low_inclusive)) { // No need to search for (-1, ... uint32_t* negative_ids = nullptr; uint32_t negative_ids_length = 0; auto abs_low = std::abs(low); // Since we store absolute values, search_lesser would yield result for >low from negative_trie. negative_trie->search_less_than(low_inclusive ? abs_low : abs_low - 1, max_level, negative_ids, negative_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(negative_ids, negative_ids_length, ids, ids_length, &out); delete [] negative_ids; delete [] ids; ids = out; } if (positive_trie != nullptr && !(high == 0 && !high_inclusive)) { // No need to search for ..., 0) uint32_t* positive_ids = nullptr; uint32_t positive_ids_length = 0; positive_trie->search_less_than(high_inclusive ? high : high - 1, max_level, positive_ids, positive_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(positive_ids, positive_ids_length, ids, ids_length, &out); delete [] positive_ids; delete [] ids; ids = out; } } else if (low >= 0) { // Search only in positive_trie if (positive_trie == nullptr) { return; } uint32_t* positive_ids = nullptr; uint32_t positive_ids_length = 0; positive_trie->search_range(low_inclusive ? low : low + 1, high_inclusive ? high : high - 1, max_level, positive_ids, positive_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(positive_ids, positive_ids_length, ids, ids_length, &out); delete [] positive_ids; delete [] ids; ids = out; } else { // Search only in negative_trie if (negative_trie == nullptr) { return; } uint32_t* negative_ids = nullptr; uint32_t negative_ids_length = 0; // Since we store absolute values, switching low and high would produce the correct result. auto abs_high = std::abs(high), abs_low = std::abs(low); negative_trie->search_range(high_inclusive ? abs_high : abs_high + 1, low_inclusive ? abs_low : abs_low - 1, max_level, negative_ids, negative_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(negative_ids, negative_ids_length, ids, ids_length, &out); delete [] negative_ids; delete [] ids; ids = out; } } NumericTrie::iterator_t NumericTrie::search_range(const int64_t& low, const bool& low_inclusive, const int64_t& high, const bool& high_inclusive) { std::vector<Node*> matches; if (low > high) { return NumericTrie::iterator_t(matches); } if (low < 0 && high >= 0) { // Have to combine the results of >low from negative_trie and <high from positive_trie if (negative_trie != nullptr && !(low == -1 && !low_inclusive)) { // No need to search for (-1, ... auto abs_low = std::abs(low); // Since we store absolute values, search_lesser would yield result for >low from negative_trie. negative_trie->search_less_than(low_inclusive ? abs_low : abs_low - 1, max_level, matches); } if (positive_trie != nullptr && !(high == 0 && !high_inclusive)) { // No need to search for ..., 0) positive_trie->search_less_than(high_inclusive ? high : high - 1, max_level, matches); } } else if (low >= 0) { // Search only in positive_trie if (positive_trie == nullptr) { return NumericTrie::iterator_t(matches); } positive_trie->search_range(low_inclusive ? low : low + 1, high_inclusive ? high : high - 1, max_level, matches); } else { // Search only in negative_trie if (negative_trie == nullptr) { return NumericTrie::iterator_t(matches); } auto abs_high = std::abs(high), abs_low = std::abs(low); // Since we store absolute values, switching low and high would produce the correct result. negative_trie->search_range(high_inclusive ? abs_high : abs_high + 1, low_inclusive ? abs_low : abs_low - 1, max_level, matches); } return NumericTrie::iterator_t(matches); } void NumericTrie::search_greater_than(const int64_t& value, const bool& inclusive, uint32_t*& ids, uint32_t& ids_length) { if ((value == 0 && inclusive) || (value == -1 && !inclusive)) { // [0, ∞), (-1, ∞) if (positive_trie != nullptr) { uint32_t* positive_ids = nullptr; uint32_t positive_ids_length = 0; positive_trie->get_all_ids(positive_ids, positive_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(positive_ids, positive_ids_length, ids, ids_length, &out); delete [] positive_ids; delete [] ids; ids = out; } return; } if (value >= 0) { if (positive_trie == nullptr) { return; } uint32_t* positive_ids = nullptr; uint32_t positive_ids_length = 0; positive_trie->search_greater_than(inclusive ? value : value + 1, max_level, positive_ids, positive_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(positive_ids, positive_ids_length, ids, ids_length, &out); delete [] positive_ids; delete [] ids; ids = out; } else { // Have to combine the results of >value from negative_trie and all the ids in positive_trie if (negative_trie != nullptr) { uint32_t* negative_ids = nullptr; uint32_t negative_ids_length = 0; auto abs_low = std::abs(value); // Since we store absolute values, search_lesser would yield result for >value from negative_trie. negative_trie->search_less_than(inclusive ? abs_low : abs_low - 1, max_level, negative_ids, negative_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(negative_ids, negative_ids_length, ids, ids_length, &out); delete [] negative_ids; delete [] ids; ids = out; } if (positive_trie == nullptr) { return; } uint32_t* positive_ids = nullptr; uint32_t positive_ids_length = 0; positive_trie->get_all_ids(positive_ids, positive_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(positive_ids, positive_ids_length, ids, ids_length, &out); delete [] positive_ids; delete [] ids; ids = out; } } NumericTrie::iterator_t NumericTrie::search_greater_than(const int64_t& value, const bool& inclusive) { std::vector<Node*> matches; if ((value == 0 && inclusive) || (value == -1 && !inclusive)) { // [0, ∞), (-1, ∞) if (positive_trie != nullptr) { matches.push_back(positive_trie); } return NumericTrie::iterator_t(matches); } if (value >= 0) { if (positive_trie != nullptr) { positive_trie->search_greater_than(inclusive ? value : value + 1, max_level, matches); } } else { // Have to combine the results of >value from negative_trie and all the ids in positive_trie if (negative_trie != nullptr) { auto abs_low = std::abs(value); // Since we store absolute values, search_lesser would yield result for >value from negative_trie. negative_trie->search_less_than(inclusive ? abs_low : abs_low - 1, max_level, matches); } if (positive_trie != nullptr) { matches.push_back(positive_trie); } } return NumericTrie::iterator_t(matches); } void NumericTrie::search_less_than(const int64_t& value, const bool& inclusive, uint32_t*& ids, uint32_t& ids_length) { if ((value == 0 && !inclusive) || (value == -1 && inclusive)) { // (-∞, 0), (-∞, -1] if (negative_trie != nullptr) { uint32_t* negative_ids = nullptr; uint32_t negative_ids_length = 0; negative_trie->get_all_ids(negative_ids, negative_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(negative_ids, negative_ids_length, ids, ids_length, &out); delete [] negative_ids; delete [] ids; ids = out; } return; } if (value < 0) { if (negative_trie == nullptr) { return; } uint32_t* negative_ids = nullptr; uint32_t negative_ids_length = 0; auto abs_low = std::abs(value); // Since we store absolute values, search_greater would yield result for <value from negative_trie. negative_trie->search_greater_than(inclusive ? abs_low : abs_low + 1, max_level, negative_ids, negative_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(negative_ids, negative_ids_length, ids, ids_length, &out); delete [] negative_ids; delete [] ids; ids = out; } else { // Have to combine the results of <value from positive_trie and all the ids in negative_trie if (positive_trie != nullptr) { uint32_t* positive_ids = nullptr; uint32_t positive_ids_length = 0; positive_trie->search_less_than(inclusive ? value : value - 1, max_level, positive_ids, positive_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(positive_ids, positive_ids_length, ids, ids_length, &out); delete [] positive_ids; delete [] ids; ids = out; } if (negative_trie == nullptr) { return; } uint32_t* negative_ids = nullptr; uint32_t negative_ids_length = 0; negative_trie->get_all_ids(negative_ids, negative_ids_length); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(negative_ids, negative_ids_length, ids, ids_length, &out); delete [] negative_ids; delete [] ids; ids = out; } } NumericTrie::iterator_t NumericTrie::search_less_than(const int64_t& value, const bool& inclusive) { std::vector<Node*> matches; if ((value == 0 && !inclusive) || (value == -1 && inclusive)) { // (-∞, 0), (-∞, -1] if (negative_trie != nullptr) { matches.push_back(negative_trie); } return NumericTrie::iterator_t(matches); } if (value < 0) { if (negative_trie != nullptr) { auto abs_low = std::abs(value); // Since we store absolute values, search_greater would yield result for <value from negative_trie. negative_trie->search_greater_than(inclusive ? abs_low : abs_low + 1, max_level, matches); } } else { // Have to combine the results of <value from positive_trie and all the ids in negative_trie if (positive_trie != nullptr) { positive_trie->search_less_than(inclusive ? value : value - 1, max_level, matches); } if (negative_trie != nullptr) { matches.push_back(negative_trie); } } return NumericTrie::iterator_t(matches); } void NumericTrie::search_equal_to(const int64_t& value, uint32_t*& ids, uint32_t& ids_length) { if ((value < 0 && negative_trie == nullptr) || (value >= 0 && positive_trie == nullptr)) { return; } uint32_t* equal_ids = nullptr; uint32_t equal_ids_length = 0; if (value < 0) { negative_trie->search_equal_to(std::abs(value), max_level, equal_ids, equal_ids_length); } else { positive_trie->search_equal_to(value, max_level, equal_ids, equal_ids_length); } uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(equal_ids, equal_ids_length, ids, ids_length, &out); delete [] equal_ids; delete [] ids; ids = out; } NumericTrie::iterator_t NumericTrie::search_equal_to(const int64_t& value) { std::vector<Node*> matches; if (value < 0 && negative_trie != nullptr) { negative_trie->search_equal_to(std::abs(value), max_level, matches); } else if (value >= 0 && positive_trie != nullptr) { positive_trie->search_equal_to(value, max_level, matches); } return NumericTrie::iterator_t(matches); } void NumericTrie::seq_ids_outside_top_k(const size_t& k, std::vector<uint32_t>& result) { size_t ids_skipped = 0; if (negative_trie != nullptr && positive_trie != nullptr) { positive_trie->seq_ids_outside_top_k(k, max_level, ids_skipped, result); if (ids_skipped < k) { // Haven't skipped k ids yet, would need to skip ids in negative trie also. negative_trie->seq_ids_outside_top_k(k, max_level, ids_skipped, result, true); return; } negative_trie->get_all_ids(result); } else if (negative_trie != nullptr) { negative_trie->seq_ids_outside_top_k(k, max_level, ids_skipped, result, true); } else { positive_trie->seq_ids_outside_top_k(k, max_level, ids_skipped, result); } } size_t NumericTrie::size() { size_t size = 0; if (negative_trie != nullptr) { size += negative_trie->get_ids_length(); } if (positive_trie != nullptr) { size += positive_trie->get_ids_length(); } return size; } inline int64_t indexable_limit(const char& max_level) { switch (max_level) { case 1: return 0xFF; case 2: return 0xFFFF; case 3: return 0xFFFFFF; case 4: return 0xFFFFFFFF; case 5: return 0xFFFFFFFFFF; case 6: return 0xFFFFFFFFFFFF; case 7: return 0xFFFFFFFFFFFFFF; case 8: return 0x7FFFFFFFFFFFFFFF; default: return 0; } } void NumericTrie::Node::insert(const int64_t& value, const uint32_t& seq_id, const char& max_level) { if (value > indexable_limit(max_level)) { return; } char level = 0; return insert_helper(value, seq_id, level, max_level); } void NumericTrie::Node::insert_geopoint(const uint64_t& cell_id, const uint32_t& seq_id, const char& max_level) { char level = 0; return insert_geopoint_helper(cell_id, seq_id, level, max_level); } inline short get_index(const int64_t& value, const char& level, const char& max_level) { // Values are index considering higher order of the bytes first. // 0x01020408 (16909320) would be indexed in the trie as follows: // Level Index // 1 1 // 2 2 // 3 4 // 4 8 return (value >> (8 * (max_level - level))) & 0xFF; } inline short get_geopoint_index(const uint64_t& cell_id, const char& level) { // Doing 8-level since cell_id is a 64 bit number. return (cell_id >> (8 * (8 - level))) & 0xFF; } void NumericTrie::Node::remove(const int64_t& value, const uint32_t& id, const char& max_level) { if (value > indexable_limit(max_level)) { return; } char level = 1; Node* root = this; auto index = get_index(value, level, max_level); while (level < max_level) { ids_t::erase(root->seq_ids, id); if (root->children == nullptr || root->children[index] == nullptr) { return; } root = root->children[index]; index = get_index(value, ++level, max_level); } ids_t::erase(root->seq_ids, id); if (root->children != nullptr && root->children[index] != nullptr) { auto& child = root->children[index]; ids_t::erase(child->seq_ids, id); if (ids_t::num_ids(child->seq_ids) == 0) { delete child; child = nullptr; } } } void NumericTrie::Node::insert_helper(const int64_t& value, const uint32_t& seq_id, char& level, const char& max_level) { if (level > max_level) { return; } // Root node contains all the sequence ids present in the tree. ids_t::upsert(seq_ids, seq_id); if (++level <= max_level) { if (children == nullptr) { children = new NumericTrie::Node* [EXPANSE]{nullptr}; } auto index = get_index(value, level, max_level); if (children[index] == nullptr) { children[index] = new NumericTrie::Node(); } return children[index]->insert_helper(value, seq_id, level, max_level); } } void NumericTrie::Node::insert_geopoint_helper(const uint64_t& cell_id, const uint32_t& seq_id, char& level, const char& max_level) { if (level > max_level) { return; } // Root node contains all the sequence ids present in the tree. ids_t::upsert(seq_ids, seq_id); if (++level <= max_level) { if (children == nullptr) { children = new NumericTrie::Node* [EXPANSE]{nullptr}; } auto index = get_geopoint_index(cell_id, level); if (children[index] == nullptr) { children[index] = new NumericTrie::Node(); } return children[index]->insert_geopoint_helper(cell_id, seq_id, level, max_level); } } char get_max_search_level(const uint64_t& cell_id, const char& max_level) { // For cell id 0x47E66C3000000000, we only have to prefix match the top four bytes since rest of the bytes are 0. // So the max search level would be 4 in this case. auto mask = (uint64_t) 0xFF << (8 * (8 - max_level)); // We're only indexing top 8-max_level bytes. char i = max_level; while (((cell_id & mask) == 0) && --i > 0) { mask <<= 8; } return i; } void NumericTrie::Node::search_geopoints_helper(const uint64_t& cell_id, const char& max_index_level, std::set<Node*>& matches) { char level = 1; Node* root = this; auto index = get_geopoint_index(cell_id, level); auto max_search_level = get_max_search_level(cell_id, max_index_level); while (level < max_search_level) { if (root->children == nullptr || root->children[index] == nullptr) { return; } root = root->children[index]; index = get_geopoint_index(cell_id, ++level); } matches.insert(root); } void NumericTrie::Node::search_geopoints(const std::vector<uint64_t>& cell_ids, const char& max_level, std::vector<uint32_t>& geo_result_ids) { std::set<Node*> matches; for (const auto &cell_id: cell_ids) { search_geopoints_helper(cell_id, max_level, matches); } for (auto const& match: matches) { ids_t::uncompress(match->seq_ids, geo_result_ids); } gfx::timsort(geo_result_ids.begin(), geo_result_ids.end()); geo_result_ids.erase(unique(geo_result_ids.begin(), geo_result_ids.end()), geo_result_ids.end()); } void NumericTrie::Node::delete_geopoint(const uint64_t& cell_id, uint32_t id, const char& max_level) { char level = 1; Node* root = this; auto index = get_geopoint_index(cell_id, level); while (level < max_level) { ids_t::erase(root->seq_ids, id); if (root->children == nullptr || root->children[index] == nullptr) { return; } root = root->children[index]; index = get_geopoint_index(cell_id, ++level); } ids_t::erase(root->seq_ids, id); if (root->children != nullptr && root->children[index] != nullptr) { auto& child = root->children[index]; ids_t::erase(child->seq_ids, id); if (ids_t::num_ids(child->seq_ids) == 0) { delete child; child = nullptr; } } } void NumericTrie::Node::get_all_ids(uint32_t*& ids, uint32_t& ids_length) { ids = ids_t::uncompress(seq_ids); ids_length = ids_t::num_ids(seq_ids); } void NumericTrie::Node::search_less_than(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length) { if (value >= indexable_limit(max_level)) { get_all_ids(ids, ids_length); return; } char level = 0; std::vector<NumericTrie::Node*> matches; search_less_than_helper(value, level, max_level, matches); std::vector<uint32_t> consolidated_ids; for (auto const& match: matches) { ids_t::uncompress(match->seq_ids, consolidated_ids); } gfx::timsort(consolidated_ids.begin(), consolidated_ids.end()); consolidated_ids.erase(unique(consolidated_ids.begin(), consolidated_ids.end()), consolidated_ids.end()); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), ids, ids_length, &out); delete [] ids; ids = out; } void NumericTrie::Node::search_less_than(const int64_t& value, const char& max_level, std::vector<Node*>& matches) { char level = 0; search_less_than_helper(value, level, max_level, matches); } void NumericTrie::Node::search_less_than_helper(const int64_t& value, char& level, const char& max_level, std::vector<Node*>& matches) { if (level == max_level) { matches.push_back(this); return; } else if (level > max_level || children == nullptr) { return; } auto index = get_index(value, ++level, max_level); if (children[index] != nullptr) { children[index]->search_less_than_helper(value, level, max_level, matches); } while (--index >= 0) { if (children[index] != nullptr) { matches.push_back(children[index]); } } --level; } void NumericTrie::Node::search_range(const int64_t& low, const int64_t& high, const char& max_level, uint32_t*& ids, uint32_t& ids_length) { if (low > high) { return; } std::vector<NumericTrie::Node*> matches; search_range_helper(low, high >= indexable_limit(max_level) ? indexable_limit(max_level) : high, max_level, matches); std::vector<uint32_t> consolidated_ids; for (auto const& match: matches) { ids_t::uncompress(match->seq_ids, consolidated_ids); } gfx::timsort(consolidated_ids.begin(), consolidated_ids.end()); consolidated_ids.erase(unique(consolidated_ids.begin(), consolidated_ids.end()), consolidated_ids.end()); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), ids, ids_length, &out); delete [] ids; ids = out; } void NumericTrie::Node::search_range(const int64_t& low, const int64_t& high, const char& max_level, std::vector<Node*>& matches) { if (low > high) { return; } search_range_helper(low, high, max_level, matches); } void NumericTrie::Node::search_range_helper(const int64_t& low,const int64_t& high, const char& max_level, std::vector<Node*>& matches) { // Segregating the nodes into matching low, in-between, and matching high. NumericTrie::Node* root = this; char level = 1; auto low_index = get_index(low, level, max_level), high_index = get_index(high, level, max_level); // Keep updating the root while the range is contained within a single child node. while (root->children != nullptr && low_index == high_index && level < max_level) { if (root->children[low_index] == nullptr) { return; } root = root->children[low_index]; level++; low_index = get_index(low, level, max_level); high_index = get_index(high, level, max_level); } if (root->children == nullptr) { return; } else if (low_index == high_index) { // low and high are equal if (root->children[low_index] != nullptr) { matches.push_back(root->children[low_index]); } return; } if (root->children[low_index] != nullptr) { // Collect all the sub-nodes that are greater than low. root->children[low_index]->search_greater_than_helper(low, level, max_level, matches); } auto index = low_index + 1; // All the nodes in-between low and high are a match by default. while (index < std::min(high_index, EXPANSE)) { if (root->children[index] != nullptr) { matches.push_back(root->children[index]); } index++; } if (index < EXPANSE && index == high_index && root->children[index] != nullptr) { // Collect all the sub-nodes that are lesser than high. root->children[index]->search_less_than_helper(high, level, max_level, matches); } } void NumericTrie::Node::search_greater_than(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length) { if (value >= indexable_limit(max_level)) { return; } char level = 0; std::vector<NumericTrie::Node*> matches; search_greater_than_helper(value, level, max_level, matches); std::vector<uint32_t> consolidated_ids; for (auto const& match: matches) { ids_t::uncompress(match->seq_ids, consolidated_ids); } gfx::timsort(consolidated_ids.begin(), consolidated_ids.end()); consolidated_ids.erase(unique(consolidated_ids.begin(), consolidated_ids.end()), consolidated_ids.end()); uint32_t* out = nullptr; ids_length = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), ids, ids_length, &out); delete [] ids; ids = out; } void NumericTrie::Node::search_greater_than(const int64_t& value, const char& max_level, std::vector<Node*>& matches) { char level = 0; search_greater_than_helper(value, level, max_level, matches); } void NumericTrie::Node::search_greater_than_helper(const int64_t& value, char& level, const char& max_level, std::vector<Node*>& matches) { if (level == max_level) { matches.push_back(this); return; } else if (level > max_level || children == nullptr) { return; } auto index = get_index(value, ++level, max_level); if (children[index] != nullptr) { children[index]->search_greater_than_helper(value, level, max_level, matches); } while (++index < EXPANSE) { if (children[index] != nullptr) { matches.push_back(children[index]); } } --level; } void NumericTrie::Node::search_equal_to(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length) { if (value > indexable_limit(max_level)) { return; } char level = 1; Node* root = this; auto index = get_index(value, level, max_level); while (level <= max_level) { if (root->children == nullptr || root->children[index] == nullptr) { return; } root = root->children[index]; index = get_index(value, ++level, max_level); } root->get_all_ids(ids, ids_length); } void NumericTrie::Node::search_equal_to(const int64_t& value, const char& max_level, std::vector<Node*>& matches) { char level = 1; Node* root = this; auto index = get_index(value, level, max_level); while (level <= max_level) { if (root->children == nullptr || root->children[index] == nullptr) { return; } root = root->children[index]; index = get_index(value, ++level, max_level); } matches.push_back(root); } uint32_t NumericTrie::Node::get_ids_length() { return ids_t::num_ids(seq_ids); } void NumericTrie::Node::seq_ids_outside_top_k(const size_t& k, const char& max_level, size_t& ids_skipped, std::vector<uint32_t>& result, const bool& is_negative) { char level = 0; seq_ids_outside_top_k_helper(k, ids_skipped, level, max_level, is_negative, result); } void NumericTrie::Node::seq_ids_outside_top_k_helper(const size_t& k, size_t& ids_skipped, char& level, const char& max_level, const bool& is_negative, std::vector<uint32_t>& result) { if (level == max_level) { std::vector<uint32_t> ids; get_all_ids(ids); for(size_t i = 0; i < ids.size(); i++) { if(ids_skipped + i >= k) { result.push_back(ids[i]); } } ids_skipped += ids.size(); return; } else if (level > max_level) { return; } if (children == nullptr) { return; } short index = is_negative ? 0 : EXPANSE - 1; // Since we need to grab ids in descending order of their values. do { if (children[index] == nullptr) { continue; } if (ids_skipped + children[index]->get_ids_length() > k) { break; } ids_skipped += children[index]->get_ids_length(); } while(is_negative ? (++index < EXPANSE) : (--index >= 0)); if (is_negative ? (index >= EXPANSE) : (index < 0)) { return; } children[index]->seq_ids_outside_top_k_helper(k, ids_skipped, ++level, max_level, is_negative, result); --level; while (is_negative ? (++index < EXPANSE) : (--index >= 0)) { if (children[index] == nullptr) { continue; } children[index]->get_all_ids(result); } } void NumericTrie::Node::get_all_ids(std::vector<uint32_t>& result) { ids_t::uncompress(seq_ids, result); } void NumericTrie::iterator_t::reset() { for (auto& match: matches) { match->index = 0; } is_valid = true; set_seq_id(); } void NumericTrie::iterator_t::skip_to(uint32_t id) { for (auto& match: matches) { ArrayUtils::skip_index_to_id(match->index, match->ids, match->ids_length, id); } set_seq_id(); } void NumericTrie::iterator_t::next() { // Advance all the matches at seq_id. for (auto& match: matches) { if (match->index < match->ids_length && match->ids[match->index] == seq_id) { match->index++; } } set_seq_id(); } NumericTrie::iterator_t::iterator_t(std::vector<Node*>& node_matches) { for (auto const& node_match: node_matches) { uint32_t* ids = nullptr; uint32_t ids_length; node_match->get_all_ids(ids, ids_length); if (ids_length > 0) { matches.emplace_back(new match_state(ids, ids_length)); } } set_seq_id(); } void NumericTrie::iterator_t::set_seq_id() { // Find the lowest id of all the matches and update the seq_id. bool one_is_valid = false; uint32_t lowest_id = UINT32_MAX; for (auto& match: matches) { if (match->index < match->ids_length) { one_is_valid = true; if (match->ids[match->index] < lowest_id) { lowest_id = match->ids[match->index]; } } } if (one_is_valid) { seq_id = lowest_id; } is_valid = one_is_valid; } NumericTrie::iterator_t& NumericTrie::iterator_t::operator=(NumericTrie::iterator_t&& obj) noexcept { if (&obj == this) return *this; for (auto& match: matches) { delete match; } matches.clear(); matches = std::move(obj.matches); seq_id = obj.seq_id; is_valid = obj.is_valid; return *this; }
34,261
C++
.cpp
823
33.003645
122
0.584919
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,807
text_embedder.cpp
typesense_typesense/src/text_embedder.cpp
#include "text_embedder.h" #include "embedder_manager.h" #include "logger.h" #include <string> #include <fstream> #include <sstream> #include <filesystem> #include <dlfcn.h> TextEmbedder::TextEmbedder(const std::string& model_name) { // create environment for local model Ort::SessionOptions session_options; auto providers = Ort::GetAvailableProviders(); for(auto& provider : providers) { if(provider == "CUDAExecutionProvider") { // check existence of shared lib void* handle = dlopen("libonnxruntime_providers_shared.so", RTLD_NOW | RTLD_GLOBAL); if(!handle) { LOG(INFO) << "ONNX shared libs: off"; // log error continue; } dlclose(handle); OrtCUDAProviderOptions cuda_options; session_options.AppendExecutionProvider_CUDA(cuda_options); } } std::string abs_path = EmbedderManager::get_absolute_model_path(model_name); session_options.EnableOrtCustomOps(); LOG(INFO) << "Loading model from disk: " << abs_path; env_ = std::make_shared<Ort::Env>(); session_ = std::make_shared<Ort::Session>(*env_, abs_path.c_str(), session_options); std::ifstream config_file(EmbedderManager::get_absolute_config_path(model_name)); nlohmann::json config; config_file >> config; TokenizerType tokenizer_type = EmbedderManager::get_tokenizer_type(config); auto vocab_path = EmbedderManager::get_absolute_vocab_path(model_name, config["vocab_file_name"].get<std::string>()); if(tokenizer_type == TokenizerType::bert) { tokenizer_ = std::make_unique<BertTokenizerWrapper>(vocab_path); } else if(tokenizer_type == TokenizerType::distilbert) { tokenizer_ = std::make_unique<DistilbertTokenizer>(vocab_path); } else if(tokenizer_type == TokenizerType::xlm_roberta) { tokenizer_ = std::make_unique<XLMRobertaTokenizer>(vocab_path); } else if(tokenizer_type == TokenizerType::clip) { tokenizer_ = std::make_unique<CLIPTokenizerWrapper>(vocab_path); output_tensor_name = "text_embeds"; num_dim = 512; return; } auto output_tensor_count = session_->GetOutputCount(); for (size_t i = 0; i < output_tensor_count; i++) { auto shape = session_->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); if (shape.size() == 3 && shape[0] == -1 && shape[1] == -1 && shape[2] > 0) { Ort::AllocatorWithDefaultOptions allocator; output_tensor_name = std::string(session_->GetOutputNameAllocated(i, allocator).get()); num_dim = shape[2]; break; } } } TextEmbedder::TextEmbedder(const nlohmann::json& model_config, size_t num_dims, const bool has_custom_dims) { const std::string& model_name = model_config["model_name"].get<std::string>(); LOG(INFO) << "Initializing remote embedding model: " << model_name; auto model_namespace = EmbedderManager::get_model_namespace(model_name); if(model_namespace == "openai") { auto api_key = model_config["api_key"].get<std::string>(); const std::string& url = model_config.contains("url") ? model_config["url"].get<std::string>() : ""; remote_embedder_ = std::make_unique<OpenAIEmbedder>(model_name, api_key, num_dims, has_custom_dims, url); } else if(model_namespace == "google") { auto api_key = model_config["api_key"].get<std::string>(); remote_embedder_ = std::make_unique<GoogleEmbedder>(api_key); } else if(model_namespace == "gcp") { auto project_id = model_config["project_id"].get<std::string>(); auto model_name = model_config["model_name"].get<std::string>(); auto access_token = model_config["access_token"].get<std::string>(); auto refresh_token = model_config["refresh_token"].get<std::string>(); auto client_id = model_config["client_id"].get<std::string>(); auto client_secret = model_config["client_secret"].get<std::string>(); remote_embedder_ = std::make_unique<GCPEmbedder>(project_id, model_name, access_token, refresh_token, client_id, client_secret); } num_dim = num_dims; } std::vector<float> TextEmbedder::mean_pooling(const std::vector<std::vector<float>>& inputs, const std::vector<int64_t>& attention_mask) { std::vector<float> pooled_output; for (int i = 0; i < inputs[0].size(); i++) { float sum = 0; for (int j = 0; j < inputs.size(); j++) { sum += inputs[j][i] * attention_mask[j]; } pooled_output.push_back(sum); } // get sum of attention mask float sum_attention_mask = 0; for(auto& val : attention_mask) { sum_attention_mask += val; } // divide by sum of attention mask for(auto& val : pooled_output) { val /= sum_attention_mask; } return pooled_output; } embedding_res_t TextEmbedder::Embed(const std::string& text, const size_t remote_embedder_timeout_ms, const size_t remote_embedding_num_tries) { if(is_remote()) { return remote_embedder_->Embed(text, remote_embedder_timeout_ms, remote_embedding_num_tries); } else { auto encoded_input = tokenizer_->Encode(text); // create input tensor object from data values Ort::AllocatorWithDefaultOptions allocator; Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault); std::vector<Ort::Value> input_tensors; std::vector<std::vector<int64_t>> input_shapes; std::vector<const char*> input_node_names = {"input_ids", "attention_mask"}; // If model is DistilBERT or sentencepiece, it has 2 inputs, else it has 3 inputs if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() != TokenizerType::clip) { input_node_names.push_back("token_type_ids"); } else if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() == TokenizerType::clip) { input_node_names.push_back("pixel_values"); } input_shapes.push_back({1, static_cast<int64_t>(encoded_input.input_ids.size())}); input_shapes.push_back({1, static_cast<int64_t>(encoded_input.attention_mask.size())}); if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() != TokenizerType::clip) { // edge case: xlm_roberta does not have token_type_ids, but if the model has it as input, we need to fill it with 0s if(encoded_input.token_type_ids.size() == 0) { encoded_input.token_type_ids.resize(encoded_input.input_ids.size(), 0); } input_shapes.push_back({1, static_cast<int64_t>(encoded_input.token_type_ids.size())}); } else if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() == TokenizerType::clip) { // dummy input for clip input_shapes.push_back({1, 3, 224, 224}); } input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, encoded_input.input_ids.data(), encoded_input.input_ids.size(), input_shapes[0].data(), input_shapes[0].size())); input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, encoded_input.attention_mask.data(), encoded_input.attention_mask.size(), input_shapes[1].data(), input_shapes[1].size())); if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() != TokenizerType::clip) { input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, encoded_input.token_type_ids.data(), encoded_input.token_type_ids.size(), input_shapes[2].data(), input_shapes[2].size())); } else if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() == TokenizerType::clip) { // dummy input for clip std::vector<float> pixel_values(3 * 224 * 224, 0.5); input_tensors.push_back(Ort::Value::CreateTensor<float>(memory_info, pixel_values.data(), pixel_values.size(), input_shapes[2].data(), input_shapes[2].size())); } //LOG(INFO) << "Running model"; // create output tensor object std::vector<const char*> output_node_names = {output_tensor_name.c_str()}; // Cannot run same model in parallel, so lock the mutex std::unique_lock<std::mutex> lock(mutex_); auto output_tensor = session_->Run(Ort::RunOptions{nullptr}, input_node_names.data(), input_tensors.data(), input_tensors.size(), output_node_names.data(), output_node_names.size()); lock.unlock(); std::vector<std::vector<float>> output; float* data = output_tensor[0].GetTensorMutableData<float>(); // print output tensor shape auto shape = output_tensor[0].GetTensorTypeAndShapeInfo().GetShape(); // edge case for clip model if(shape.size() == 2) { // insert 1 to index 0 shape.insert(shape.begin(), 1); } for (int i = 0; i < shape[1]; i++) { std::vector<float> temp; for (int j = 0; j < shape[2]; j++) { temp.push_back(data[i * shape[2] + j]); } // edge case for clip model if(tokenizer_->get_tokenizer_type() == TokenizerType::clip) { return embedding_res_t(temp); } output.push_back(temp); } auto pooled_output = mean_pooling(output, encoded_input.attention_mask); return embedding_res_t(pooled_output); } } std::vector<embedding_res_t> TextEmbedder::batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size, const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) { std::vector<embedding_res_t> outputs; if(!is_remote()) { for(int i = 0; i < inputs.size(); i += 8) { auto input_batch = std::vector<std::string>(inputs.begin() + i, inputs.begin() + std::min(i + 8, static_cast<int>(inputs.size()))); auto encoded_inputs = batch_encode(input_batch); // create input tensor object from data values Ort::AllocatorWithDefaultOptions allocator; Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault); std::vector<Ort::Value> input_tensors; std::vector<std::vector<int64_t>> input_shapes; std::vector<const char*> input_node_names = {"input_ids", "attention_mask"}; // If model is DistilBERT or sentencepiece, it has 2 inputs, else it has 3 inputs if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() != TokenizerType::clip) { input_node_names.push_back("token_type_ids"); } else if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() == TokenizerType::clip) { input_node_names.push_back("pixel_values"); } input_shapes.push_back({static_cast<int64_t>(encoded_inputs.input_ids.size()), static_cast<int64_t>(encoded_inputs.input_ids[0].size())}); input_shapes.push_back({static_cast<int64_t>(encoded_inputs.attention_mask.size()), static_cast<int64_t>(encoded_inputs.attention_mask[0].size())}); if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() != TokenizerType::clip) { input_shapes.push_back({static_cast<int64_t>(encoded_inputs.token_type_ids.size()), static_cast<int64_t>(encoded_inputs.token_type_ids[0].size())}); } else if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() == TokenizerType::clip) { // dummy input for clip input_shapes.push_back({1, 3, 224, 224}); } std::vector<int64_t> input_ids_flatten; std::vector<int64_t> attention_mask_flatten; std::vector<int64_t> token_type_ids_flatten; for (int i = 0; i < encoded_inputs.input_ids.size(); i++) { for (int j = 0; j < encoded_inputs.input_ids[i].size(); j++) { input_ids_flatten.push_back(encoded_inputs.input_ids[i][j]); } } for (int i = 0; i < encoded_inputs.attention_mask.size(); i++) { for (int j = 0; j < encoded_inputs.attention_mask[i].size(); j++) { attention_mask_flatten.push_back(encoded_inputs.attention_mask[i][j]); } } if(session_->GetInputCount() == 3) { for (int i = 0; i < encoded_inputs.token_type_ids.size(); i++) { for (int j = 0; j < encoded_inputs.token_type_ids[i].size(); j++) { token_type_ids_flatten.push_back(encoded_inputs.token_type_ids[i][j]); } } } input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, input_ids_flatten.data(), input_ids_flatten.size(), input_shapes[0].data(), input_shapes[0].size())); input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, attention_mask_flatten.data(), attention_mask_flatten.size(), input_shapes[1].data(), input_shapes[1].size())); if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() != TokenizerType::clip) { input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, token_type_ids_flatten.data(), token_type_ids_flatten.size(), input_shapes[2].data(), input_shapes[2].size())); } else if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() == TokenizerType::clip) { // dummy input for clip std::vector<float> pixel_values(3 * 224 * 224, 0.5); input_tensors.push_back(Ort::Value::CreateTensor<float>(memory_info, pixel_values.data(), pixel_values.size(), input_shapes[2].data(), input_shapes[2].size())); } //LOG(INFO) << "Running model"; // create output tensor object std::vector<const char*> output_node_names = {output_tensor_name.c_str()}; // if seq length is 0, return empty vector if(input_shapes[0][1] == 0) { for(int i = 0; i < input_batch.size(); i++) { outputs.push_back(embedding_res_t(400, nlohmann::json({{"error", "Invalid input: empty sequence"}}))); } continue; } std::unique_lock<std::mutex> lock(mutex_); auto output_tensor = session_->Run(Ort::RunOptions{nullptr}, input_node_names.data(), input_tensors.data(), input_tensors.size(), output_node_names.data(), output_node_names.size()); lock.unlock(); float* data = output_tensor[0].GetTensorMutableData<float>(); // print output tensor shape auto shape = output_tensor[0].GetTensorTypeAndShapeInfo().GetShape(); // edge case for clip model if(shape.size() == 2) { // insert 1 to index 0 shape.insert(shape.begin(), 1); } for (int i = 0; i < shape[0]; i++) { std::vector<std::vector<float>> output; for (int j = 0; j < shape[1]; j++) { std::vector<float> output_row; for (int k = 0; k < shape[2]; k++) { output_row.push_back(data[i * shape[1] * shape[2] + j * shape[2] + k]); } if(tokenizer_->get_tokenizer_type() == TokenizerType::clip) { // no mean pooling for clip outputs.push_back(embedding_res_t(output_row)); continue; } output.push_back(output_row); } if(tokenizer_->get_tokenizer_type() != TokenizerType::clip) { outputs.push_back(embedding_res_t(mean_pooling(output, encoded_inputs.attention_mask[i]))); } } } } else { outputs = std::move(remote_embedder_->batch_embed(inputs, remote_embedding_batch_size, remote_embedding_timeout_ms, remote_embedding_num_tries)); } return outputs; } TextEmbedder::~TextEmbedder() { } batch_encoded_input_t TextEmbedder::batch_encode(const std::vector<std::string>& inputs) { batch_encoded_input_t encoded_inputs; for(auto& input : inputs) { auto encoded_input = tokenizer_->Encode(input); encoded_inputs.input_ids.push_back(encoded_input.input_ids); encoded_inputs.attention_mask.push_back(encoded_input.attention_mask); encoded_inputs.token_type_ids.push_back(encoded_input.token_type_ids); } // Pad inputs size_t max_input_len = 0; for(auto& input_ids : encoded_inputs.input_ids) { if(input_ids.size() > max_input_len) { max_input_len = input_ids.size(); } } for(auto& input_ids : encoded_inputs.input_ids) { input_ids.resize(max_input_len, 0); } for(auto& attention_mask : encoded_inputs.attention_mask) { attention_mask.resize(max_input_len, 0); } for(auto& token_type_ids : encoded_inputs.token_type_ids) { token_type_ids.resize(max_input_len, 0); } return encoded_inputs; } Option<bool> TextEmbedder::validate() { if(session_->GetInputCount() != 3 && session_->GetInputCount() != 2) { LOG(ERROR) << "Invalid model: input count is not 3 or 2"; return Option<bool>(400, "Invalid model: input count is not 3 or 2"); } Ort::AllocatorWithDefaultOptions allocator; auto input_ids_name = session_->GetInputNameAllocated(0, allocator); if (std::strcmp(input_ids_name.get(), "input_ids") != 0) { LOG(ERROR) << "Invalid model: input_ids tensor not found"; return Option<bool>(400, "Invalid model: input_ids tensor not found"); } auto attention_mask_index = tokenizer_->get_tokenizer_type() == TokenizerType::clip ? 2 : 1; auto attention_mask_name = session_->GetInputNameAllocated(attention_mask_index, allocator); if (std::strcmp(attention_mask_name.get(), "attention_mask") != 0) { LOG(ERROR) << "Invalid model: attention_mask tensor not found"; return Option<bool>(400, "Invalid model: attention_mask tensor not found"); } if(session_->GetInputCount() == 3 && tokenizer_->get_tokenizer_type() != TokenizerType::clip) { auto token_type_ids_name = session_->GetInputNameAllocated(2, allocator); if (std::strcmp(token_type_ids_name.get(), "token_type_ids") != 0) { LOG(ERROR) << "Invalid model: token_type_ids tensor not found"; return Option<bool>(400, "Invalid model: token_type_ids tensor not found"); } } auto output_tensor_count = session_->GetOutputCount(); bool found_output_tensor = false; for (size_t i = 0; i < output_tensor_count; i++) { auto shape = session_->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); // clip output tensor if(shape.size() == 2 && shape[0] == -1 && shape[1] == 512 && tokenizer_->get_tokenizer_type() == TokenizerType::clip) { auto name = session_->GetOutputNameAllocated(i, allocator); if (std::strcmp(name.get(), "text_embeds") == 0) { found_output_tensor = true; break; } } if (shape.size() == 3 && shape[0] == -1 && shape[1] == -1 && shape[2] > 0) { found_output_tensor = true; break; } } if (!found_output_tensor) { LOG(ERROR) << "Invalid model: Output tensor not found"; return Option<bool>(400, "Invalid model: Output tensor not found"); } return Option<bool>(true); } const size_t TextEmbedder::get_num_dim() const { return num_dim; }
20,010
C++
.cpp
349
47.143266
206
0.609614
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,808
embedder_manager.cpp
typesense_typesense/src/embedder_manager.cpp
#include "embedder_manager.h" #include "system_metrics.h" EmbedderManager& EmbedderManager::get_instance() { static EmbedderManager instance; return instance; } Option<bool> EmbedderManager::validate_and_init_model(const nlohmann::json& model_config, size_t& num_dims) { const std::string& model_name = model_config["model_name"].get<std::string>(); if(is_remote_model(model_name)) { LOG(INFO) << "Validating and initializing remote model: " << model_name; return validate_and_init_remote_model(model_config, num_dims); } else { LOG(INFO) << "Validating and initializing local model: " << model_name; auto op = validate_and_init_local_model(model_config, num_dims); if(op.ok()) { LOG(INFO) << "Finished initializing local model: " << model_name; } else { LOG(ERROR) << "Failed to initialize local model " << model_name << ", error: " << op.error(); } return op; } } Option<bool> EmbedderManager::validate_and_init_remote_model(const nlohmann::json& model_config, size_t& num_dims) { const std::string& model_name = model_config["model_name"].get<std::string>(); auto model_namespace = EmbedderManager::get_model_namespace(model_name); bool has_custom_dims = false; if(model_namespace == "openai") { auto num_dims_before = num_dims; auto op = OpenAIEmbedder::is_model_valid(model_config, num_dims); // if the dimensions did not change, it means the model has custom dimensions has_custom_dims = num_dims_before == num_dims; if(!op.ok()) { return op; } } else if(model_namespace == "google") { auto op = GoogleEmbedder::is_model_valid(model_config, num_dims); if(!op.ok()) { return op; } } else if(model_namespace == "gcp") { auto op = GCPEmbedder::is_model_valid(model_config, num_dims); if(!op.ok()) { return op; } } else { return Option<bool>(400, "Invalid model namespace"); } std::unique_lock<std::mutex> lock(text_embedders_mutex); std::string model_key = is_remote_model(model_name) ? RemoteEmbedder::get_model_key(model_config) : model_name; auto text_embedder_it = text_embedders.find(model_key); if(text_embedder_it == text_embedders.end()) { text_embedders.emplace(model_key, std::make_shared<TextEmbedder>(model_config, num_dims, has_custom_dims)); } return Option<bool>(true); } Option<bool> EmbedderManager::update_remote_model_apikey(const nlohmann::json &model_config, const std::string& new_apikey) { std::unique_lock<std::mutex> lock(text_embedders_mutex); const auto& model_key = RemoteEmbedder::get_model_key(model_config); if(text_embedders.find(model_key) == text_embedders.end()) { return Option<bool>(404, "Text embedder was not found."); } if(!text_embedders[model_key]->is_remote()) { return Option<bool>(400, "Text embedder is not valid."); } if(!text_embedders[model_key]->update_remote_embedder_apikey(new_apikey)) { return Option<bool>(400, "Failed to update remote model api_key."); } //update text embedder with new api_key and remove old entry auto updated_model_config = model_config; updated_model_config["api_key"] = new_apikey; const auto& updated_model_key = RemoteEmbedder::get_model_key(updated_model_config); text_embedders[updated_model_key] = text_embedders[model_key]; text_embedders.erase(model_key); return Option<bool>(true); } Option<bool> EmbedderManager::validate_and_init_local_model(const nlohmann::json& model_config, size_t& num_dims) { const std::string& model_name = model_config["model_name"].get<std::string>(); Option<bool> public_model_op = EmbedderManager::get_instance().init_public_model(model_name); if(!public_model_op.ok()) { return public_model_op; } std::string abs_path = EmbedderManager::get_absolute_model_path( EmbedderManager::get_model_name_without_namespace(model_name)); if(!std::filesystem::exists(abs_path)) { LOG(ERROR) << "Model file not found: " << abs_path; return Option<bool>(400, "Model file not found"); } bool is_public_model = public_model_op.get(); if(!is_public_model) { if(!std::filesystem::exists(EmbedderManager::get_absolute_config_path(model_name))) { LOG(ERROR) << "Config file not found: " << EmbedderManager::get_absolute_config_path(model_name); return Option<bool>(400, "Config file not found"); } std::ifstream config_file(EmbedderManager::get_absolute_config_path(model_name)); nlohmann::json config; config_file >> config; if(config["model_type"].is_null() || config["vocab_file_name"].is_null()) { LOG(ERROR) << "Invalid config file: " << EmbedderManager::get_absolute_config_path(model_name); return Option<bool>(400, "Invalid config file"); } if(!config["model_type"].is_string() || !config["vocab_file_name"].is_string()) { LOG(ERROR) << "Invalid config file: " << EmbedderManager::get_absolute_config_path(model_name); return Option<bool>(400, "Invalid config file"); } if(!std::filesystem::exists(EmbedderManager::get_model_subdir(model_name) + "/" + config["vocab_file_name"].get<std::string>())) { LOG(ERROR) << "Vocab file not found: " << EmbedderManager::get_model_subdir(model_name) + "/" + config["vocab_file_name"].get<std::string>(); return Option<bool>(400, "Vocab file not found"); } if(config["model_type"].get<std::string>() != "bert" && config["model_type"].get<std::string>() != "xlm_roberta" && config["model_type"].get<std::string>() != "distilbert" && config["model_type"].get<std::string>() != "clip") { LOG(ERROR) << "Invalid model type: " << config["model_type"].get<std::string>(); return Option<bool>(400, "Invalid model type"); } } std::unique_lock<std::mutex> lock(text_embedders_mutex); auto text_embedder_it = text_embedders.find(model_name); if(text_embedder_it != text_embedders.end()) { num_dims = text_embedder_it->second->get_num_dim(); return Option<bool>(true); } const auto& model_name_without_namespace = get_model_name_without_namespace(model_name); const auto& free_memory = SystemMetrics::get_memory_free_bytes(); const auto& model_file_size = std::filesystem::file_size(abs_path); // return error if (model file size * 1.15) is greater than free memory if(model_file_size * 1.15 > free_memory) { LOG(ERROR) << "Memory required to load the model exceeds free memory available."; return Option<bool>(400, "Memory required to load the model exceeds free memory available."); } const std::shared_ptr<TextEmbedder>& embedder = std::make_shared<TextEmbedder>(model_name_without_namespace); auto validate_op = embedder->validate(); if(!validate_op.ok()) { return validate_op; } num_dims = embedder->get_num_dim(); text_embedders.emplace(model_name, embedder); // if model is clip, generate image embedder if(embedder->get_tokenizer_type() == TokenizerType::clip) { auto image_embedder = std::make_shared<CLIPImageEmbedder>(embedder->get_session(), embedder->get_env(), get_model_subdir(model_name_without_namespace)); image_embedders.emplace(model_name, image_embedder); } return Option<bool>(true); } Option<TextEmbedder*> EmbedderManager::get_text_embedder(const nlohmann::json& model_config) { std::unique_lock<std::mutex> lock(text_embedders_mutex); const std::string& model_name = model_config.at("model_name"); std::string model_key = is_remote_model(model_name) ? RemoteEmbedder::get_model_key(model_config) : model_name; auto text_embedder_it = text_embedders.find(model_key); if(text_embedder_it == text_embedders.end()) { return Option<TextEmbedder*>(404, "Text embedder was not found."); } return Option<TextEmbedder*>(text_embedder_it->second.get()); } Option<ImageEmbedder*> EmbedderManager::get_image_embedder(const nlohmann::json& model_config) { std::unique_lock<std::mutex> lock(image_embedders_mutex); const std::string& model_name = model_config.at("model_name"); auto image_embedder_it = image_embedders.find(model_name); if(image_embedder_it == image_embedders.end()) { return Option<ImageEmbedder*>(404, "Image embedder was not found."); } return Option<ImageEmbedder*>(image_embedder_it->second.get()); } void EmbedderManager::delete_text_embedder(const std::string& model_path) { std::unique_lock<std::mutex> lock(text_embedders_mutex); if (text_embedders.find(model_path) != text_embedders.end()) { text_embedders.erase(model_path); } if (public_models.find(model_path) != public_models.end()) { public_models.erase(model_path); } } void EmbedderManager::delete_all_text_embedders() { std::unique_lock<std::mutex> lock(text_embedders_mutex); text_embedders.clear(); } void EmbedderManager::delete_image_embedder(const std::string& model_path) { std::unique_lock<std::mutex> lock(image_embedders_mutex); if (image_embedders.find(model_path) != image_embedders.end()) { image_embedders.erase(model_path); } } void EmbedderManager::delete_all_image_embedders() { std::unique_lock<std::mutex> lock(image_embedders_mutex); image_embedders.clear(); } const TokenizerType EmbedderManager::get_tokenizer_type(const nlohmann::json& model_config) { if(model_config.find("model_type") == model_config.end()) { return TokenizerType::bert; } else { std::string tokenizer_type = model_config.at("model_type").get<std::string>(); if(tokenizer_type == "distilbert") { return TokenizerType::distilbert; } else if(tokenizer_type == "xlm_roberta") { return TokenizerType::xlm_roberta; } else if(tokenizer_type == "clip") { return TokenizerType::clip; } else { return TokenizerType::bert; } } } const std::string EmbedderManager::get_indexing_prefix(const nlohmann::json& model_config) { std::string val; if(is_public_model(model_config["model_name"].get<std::string>())) { std::unique_lock<std::mutex> lock(text_embedders_mutex); val = public_models[model_config["model_name"].get<std::string>()].indexing_prefix; } else { val = model_config.count("indexing_prefix") == 0 ? "" : model_config["indexing_prefix"].get<std::string>(); } if(!val.empty()) { val += " "; } return val; } const std::string EmbedderManager::get_query_prefix(const nlohmann::json& model_config) { std::string val; if(is_public_model(model_config["model_name"].get<std::string>())) { std::unique_lock<std::mutex> lock(text_embedders_mutex); val = public_models[model_config["model_name"].get<std::string>()].query_prefix; } else { val = model_config.count("query_prefix") == 0 ? "" : model_config["query_prefix"].get<std::string>(); } if(!val.empty()) { val += " "; } return val; } void EmbedderManager::set_model_dir(const std::string& dir) { // create the directory if it doesn't exist if(!std::filesystem::exists(dir)) { std::filesystem::create_directories(dir); } model_dir = dir; } const std::string& EmbedderManager::get_model_dir() { return model_dir; } EmbedderManager::~EmbedderManager() { } const std::string EmbedderManager::get_absolute_model_path(const std::string& model_name) { return get_model_subdir(model_name) + "/model.onnx"; } const std::string EmbedderManager::get_absolute_vocab_path(const std::string& model_name, const std::string& vocab_file_name) { return get_model_subdir(model_name) + "/" + vocab_file_name; } const std::string EmbedderManager::get_absolute_config_path(const std::string& model_name) { return get_model_subdir(model_name) + "/config.json"; } const bool EmbedderManager::check_md5(const std::string& file_path, const std::string& target_md5) { const size_t BUFF_SIZE = 4096 * 4; std::ifstream infile(file_path, std::ifstream::binary); if(infile.fail()) { return false; } EVP_MD_CTX* mdctx = EVP_MD_CTX_new(); // md5 context const EVP_MD* md5Func = EVP_md5(); // use EVP md5 function EVP_DigestInit_ex(mdctx, md5Func, NULL); // Initializes digest type // reads in values from buffer containing file pointer char buff[BUFF_SIZE]; while(infile.good()) { infile.read(buff, sizeof(buff)); EVP_DigestUpdate(mdctx, buff, infile.gcount()); } unsigned int md_len; // hash length unsigned char md5_value[EVP_MAX_MD_SIZE]; // actual hash value EVP_DigestFinal_ex(mdctx, md5_value, &md_len); EVP_MD_CTX_free(mdctx); std::stringstream res; // convert md5 to hex string with leading zeros for (size_t i = 0; i < md_len; i++) { res << std::hex << std::setfill('0') << std::setw(2) << (int)md5_value[i]; } return res.str() == target_md5; } Option<bool> EmbedderManager::download_public_model(const text_embedding_model& model) { HttpClient& httpClient = HttpClient::get_instance(); auto actual_model_name = get_model_name_without_namespace(model.model_name); if(!check_md5(get_absolute_model_path(actual_model_name), model.model_md5)) { long res = httpClient.download_file(get_model_url(model), get_absolute_model_path(actual_model_name)); if(res != 200) { LOG(INFO) << "Failed to download public model: " << model.model_name; return Option<bool>(400, "Failed to download model file"); } } if(!model.data_file_md5.empty()) { if(!check_md5(get_absolute_model_path(actual_model_name) + "_data", model.data_file_md5)) { long res = httpClient.download_file(get_model_data_url(model), get_absolute_model_path(actual_model_name) + "_data"); if(res != 200) { LOG(INFO) << "Failed to download public model data file: " << model.model_name; return Option<bool>(400, "Failed to download model data file"); } } } if(!model.vocab_md5.empty() && !check_md5(get_absolute_vocab_path(actual_model_name, model.vocab_file_name), model.vocab_md5)) { long res = httpClient.download_file(get_vocab_url(model), get_absolute_vocab_path(actual_model_name, model.vocab_file_name)); if(res != 200) { LOG(INFO) << "Failed to download default vocab for model: " << model.model_name; return Option<bool>(400, "Failed to download vocab file"); } } if(!model.tokenizer_md5.empty()) { auto tokenizer_file_path = get_model_subdir(actual_model_name) + "/" + model.tokenizer_file_name; if(!check_md5(tokenizer_file_path, model.tokenizer_md5)) { long res = httpClient.download_file(MODELS_REPO_URL + actual_model_name + "/" + model.tokenizer_file_name, tokenizer_file_path); if(res != 200) { LOG(INFO) << "Failed to download tokenizer file for model: " << model.model_name; return Option<bool>(400, "Failed to download tokenizer file"); } } } if(!model.image_processor_md5.empty()) { auto image_processor_file_path = get_model_subdir(actual_model_name) + "/" + model.image_processor_file_name; if(!check_md5(image_processor_file_path, model.image_processor_md5)) { long res = httpClient.download_file(MODELS_REPO_URL + actual_model_name + "/" + model.image_processor_file_name, image_processor_file_path); if(res != 200) { LOG(INFO) << "Failed to download image processor file for model: " << model.model_name; return Option<bool>(400, "Failed to download image processor file"); } } } return Option<bool>(true); } Option<bool> EmbedderManager::init_public_model(const std::string& model_name) { std::unique_lock<std::mutex> lock(text_embedders_mutex); if(public_models.find(model_name) != public_models.end()) { // model has already been initialized return Option<bool>(true); } auto model_namespace = get_namespace(model_name); if(!model_namespace.ok() || model_namespace.get() != "ts") { // not a public model return Option<bool>(false); } auto actual_model_name = get_model_name_without_namespace(model_name); auto model_config_op = get_public_model_config(actual_model_name); if(!model_config_op.ok()) { return Option<bool>(model_config_op.code(), model_config_op.error()); } auto config = model_config_op.get(); config["model_name"] = actual_model_name; auto model = text_embedding_model(config); auto download_op = EmbedderManager::get_instance().download_public_model(model); if (!download_op.ok()) { LOG(ERROR) << download_op.error(); return Option<bool>(400, download_op.error()); } public_models.emplace(model_name, text_embedding_model(config)); return Option<bool>(true);; } bool EmbedderManager::is_public_model(const std::string& model_name) { std::unique_lock<std::mutex> lock(text_embedders_mutex); return public_models.find(model_name) != public_models.end(); } const std::string EmbedderManager::get_model_subdir(const std::string& model_name) { if(model_dir.back() != '/') { // create subdir <model_name> if it doesn't exist if(!std::filesystem::exists(model_dir + "/" + model_name)) { std::filesystem::create_directories(model_dir + "/" + model_name); } return model_dir + "/" + model_name; } else { // create subdir <model_name> if it doesn't exist if(!std::filesystem::exists(model_dir + model_name)) { std::filesystem::create_directories(model_dir + model_name); } return model_dir + model_name; } } Option<std::string> EmbedderManager::get_namespace(const std::string& model_name) { // <namespace>/<model_name> if / is present in model_name if(model_name.find("/") != std::string::npos) { return Option<std::string>(model_name.substr(0, model_name.find("/"))); } else { return Option<std::string>(404, "Namespace not found"); } } const std::string EmbedderManager::get_model_name_without_namespace(const std::string& model_name) { // <namespace>/<model_name> if / is present in model_name if(model_name.find("/") != std::string::npos) { return model_name.substr(model_name.find("/") + 1); } else { return model_name; } } text_embedding_model::text_embedding_model(const nlohmann::json& json) { if(json.count("model_name") != 0) { model_name = json["model_name"].get<std::string>(); } if(json.count("model_md5") != 0) { model_md5 = json["model_md5"].get<std::string>(); } if(json.count("vocab_file_name") != 0) { vocab_file_name = json["vocab_file_name"].get<std::string>(); } if(json.count("vocab_md5") != 0) { vocab_md5 = json["vocab_md5"].get<std::string>(); } tokenizer_type = EmbedderManager::get_tokenizer_type(json); if(json.count("indexing_prefix") != 0) { indexing_prefix = json.at("indexing_prefix").get<std::string>(); } if(json.count("query_prefix") != 0) { query_prefix = json.at("query_prefix").get<std::string>(); } if(json.count("data_md5") != 0) { data_file_md5 = json.at("data_md5").get<std::string>(); } if(json.count("tokenizer_md5") != 0) { tokenizer_md5 = json.at("tokenizer_md5").get<std::string>(); } if(json.count("tokenizer_file_name") != 0) { tokenizer_file_name = json.at("tokenizer_file_name").get<std::string>(); } if(json.count("image_processor_md5") != 0) { image_processor_md5 = json.at("image_processor_md5").get<std::string>(); } if(json.count("image_processor_file_name") != 0) { image_processor_file_name = json.at("image_processor_file_name").get<std::string>(); } if(json.count("has_image_embedder") != 0) { has_image_embedder = json.at("has_image_embedder").get<bool>(); } } Option<nlohmann::json> EmbedderManager::get_public_model_config(const std::string& model_name) { auto actual_model_name = get_model_name_without_namespace(model_name); HttpClient& httpClient = HttpClient::get_instance(); std::unordered_map<std::string, std::string> headers; headers["Accept"] = "application/json"; std::map<std::string, std::string> response_headers; std::string response_body; long res = httpClient.get_response(MODELS_REPO_URL + actual_model_name + "/" + MODEL_CONFIG_FILE, response_body, response_headers, headers); if(res == 200 || res == 302) { // cache the config file auto config_file_path = get_absolute_config_path(actual_model_name); std::ofstream config_file(config_file_path); config_file << response_body; config_file.close(); return Option<nlohmann::json>(nlohmann::json::parse(response_body)); } // check cache if network fails if(std::filesystem::exists(get_absolute_config_path(model_name))) { std::ifstream config_file(get_absolute_config_path(model_name)); nlohmann::json config; config_file >> config; config_file.close(); return Option<nlohmann::json>(config); } if(res >= 500) { return Option<nlohmann::json>(res, "Model repository is down. Status code: " + std::to_string(res)); } return Option<nlohmann::json>(404, "Model not found"); } const std::string EmbedderManager::get_model_url(const text_embedding_model& model) { return MODELS_REPO_URL + model.model_name + "/model.onnx"; } const std::string EmbedderManager::get_model_data_url(const text_embedding_model& model) { return MODELS_REPO_URL + model.model_name + "/model.onnx_data"; } const std::string EmbedderManager::get_vocab_url(const text_embedding_model& model) { return MODELS_REPO_URL + model.model_name + "/" + model.vocab_file_name; } const std::string EmbedderManager::get_model_namespace(const std::string& model_name) { if(model_name.find("/") != std::string::npos) { return model_name.substr(0, model_name.find("/")); } else { return "ts"; } } bool EmbedderManager::is_remote_model(const std::string& model_name) { auto model_namespace = get_namespace(model_name); return model_namespace.ok() && (model_namespace.get() == "openai" || model_namespace.get() == "google" || model_namespace.get() == "gcp"); }
23,054
C++
.cpp
471
42.352442
235
0.649767
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,809
http_proxy.cpp
typesense_typesense/src/http_proxy.cpp
#include "http_proxy.h" #include "logger.h" #include <chrono> using namespace std::chrono_literals; HttpProxy::HttpProxy() : cache(30s){ } http_proxy_res_t HttpProxy::call(const std::string& url, const std::string& method, const std::string& req_body, const std::unordered_map<std::string, std::string>& req_headers, const size_t timeout_ms) { HttpClient& client = HttpClient::get_instance(); http_proxy_res_t res; if(method == "GET") { res.status_code = client.get_response(url, res.body, res.headers, req_headers, timeout_ms); } else if(method == "POST") { res.status_code = client.post_response(url, req_body, res.body, res.headers, req_headers, timeout_ms); } else if(method == "PUT") { res.status_code = client.put_response(url, req_body, res.body, res.headers, timeout_ms); } else if(method == "DELETE") { res.status_code = client.delete_response(url, res.body, res.headers, timeout_ms); } else if(method == "POST_STREAM") { async_stream_response_t stream_res; res.status_code = client.post_response_stream(url, req_body, stream_res, res.headers, req_headers, timeout_ms); std::unique_lock lock(stream_res.mutex); stream_res.cv.wait(lock, [&](){ return stream_res.ready; }); nlohmann::json j; j["response"] = stream_res.response_chunks; res.body = j.dump(); } else { res.status_code = 400; nlohmann::json j; j["message"] = "Parameter `method` must be one of GET, POST, POST_STREAM, PUT, DELETE."; res.body = j.dump(); } return res; } http_proxy_res_t HttpProxy::send(const std::string& url, const std::string& method, const std::string& req_body, std::unordered_map<std::string, std::string>& req_headers) { // check if url is in cache uint64_t key = StringUtils::hash_wy(url.c_str(), url.size()); key = StringUtils::hash_combine(key, StringUtils::hash_wy(method.c_str(), method.size())); key = StringUtils::hash_combine(key, StringUtils::hash_wy(req_body.c_str(), req_body.size())); size_t timeout_ms = default_timeout_ms; size_t num_try = default_num_try; if(req_headers.find("timeout_ms") != req_headers.end()){ timeout_ms = std::stoul(req_headers.at("timeout_ms")); req_headers.erase("timeout_ms"); } if(req_headers.find("num_try") != req_headers.end()){ num_try = std::stoul(req_headers.at("num_try")); req_headers.erase("num_try"); } for(auto& header : req_headers){ key = StringUtils::hash_combine(key, StringUtils::hash_wy(header.first.c_str(), header.first.size())); key = StringUtils::hash_combine(key, StringUtils::hash_wy(header.second.c_str(), header.second.size())); } std::shared_lock slock(mutex); if(cache.contains(key)){ return cache[key]; } slock.unlock(); http_proxy_res_t res; for(size_t i = 0; i < num_try; i++){ res = call(url, method, req_body, req_headers, timeout_ms); if(res.status_code != 408 && res.status_code < 500){ break; } LOG(ERROR) << "Proxy call failed, status_code: " << res.status_code << ", timeout_ms: " << timeout_ms << ", try: " << i+1 << ", num_try: " << num_try; } if(res.status_code == 408){ nlohmann::json j; j["message"] = "Server error on remote server. Please try again later."; res.body = j.dump(); } // add to cache if(res.status_code == 200){ std::unique_lock ulock(mutex); cache.insert(key, res); } return res; }
3,764
C++
.cpp
84
36.797619
119
0.596779
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,810
image_embedder.cpp
typesense_typesense/src/image_embedder.cpp
#include "image_embedder.h" #include "text_embedder_remote.h" CLIPImageEmbedder::CLIPImageEmbedder(const std::shared_ptr<Ort::Session>& session, const std::shared_ptr<Ort::Env>& env, const std::string& model_path) : image_processor_(model_path), session_(session), env_(env) { } embedding_res_t CLIPImageEmbedder::embed(const std::string& encoded_image) { std::unique_lock<std::mutex> lock(mutex_); // process image auto processed_image_op = image_processor_.process_image(encoded_image); lock.unlock(); if (!processed_image_op.ok()) { nlohmann::json error_json; error_json["error"] = processed_image_op.error(); return embedding_res_t(processed_image_op.code(), error_json); } auto processed_image = processed_image_op.get(); // create input tensor std::vector<int64_t> input_shape = {1, 3, 224, 224}; std::vector<const char*> input_names = {"pixel_values"}; Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, (float*) processed_image.data(), processed_image.size(), input_shape.data(), input_shape.size()); // create output tensor std::vector<const char*> output_names = {"image_embeds"}; // run inference // LOG(INFO) << "Running image embedder"; lock.lock(); auto output_tensors = session_->Run(Ort::RunOptions{nullptr}, input_names.data(), &input_tensor, 1, output_names.data(), output_names.size()); lock.unlock(); // get output tensor auto output_tensor = output_tensors.front().GetTensorMutableData<float>(); auto shape = output_tensors.front().GetTensorTypeAndShapeInfo().GetShape(); if (shape.size() != 2) { return embedding_res_t(400, "Invalid shape of output tensor"); } std::vector<float> output_vector; for (int i = 0; i < shape[1]; i++) { output_vector.push_back(output_tensor[i]); } return embedding_res_t(std::move(output_vector)); } std::vector<embedding_res_t> CLIPImageEmbedder::batch_embed(const std::vector<std::string>& inputs) { std::vector<processed_image_t> processed_images; std::unordered_map<int, embedding_res_t> results; int i = 0; for (const auto& input : inputs) { std::unique_lock<std::mutex> lock(mutex_); auto processed_image_op = image_processor_.process_image(input); lock.unlock(); if (!processed_image_op.ok()) { nlohmann::json error_json; error_json["error"] = processed_image_op.error(); results[i] = embedding_res_t(processed_image_op.code(), error_json); i++; continue; } processed_images.push_back(processed_image_op.get()); i++; } // no valid images if (processed_images.empty()) { std::vector<embedding_res_t> result_vector(inputs.size()); for (int i = 0; i < inputs.size(); i++) { result_vector[i] = results[i]; } return result_vector; } // create input tensor std::vector<int64_t> input_shape = {static_cast<int64_t>(processed_images.size()), 3, 224, 224}; std::vector<const char*> input_names = {"input_ids", "pixel_values", "attention_mask"}; std::vector<int64_t> dummy_input_ids_shape = {1,1}; std::vector<int64_t> dummy_input_ids = {0}; std::vector<int64_t> dummy_attention_mask_shape = {1,1}; std::vector<int64_t> dummy_attention_mask = {1}; Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); // convert 2D vector to 1D vector std::vector<float> input_vector; for (auto& image : processed_images) { input_vector.reserve(input_vector.size() + image.size()); std::move(image.begin(), image.end(), std::back_inserter(input_vector)); image.clear(); } std::vector<Ort::Value> input_tensors; input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, (int64_t*) dummy_input_ids.data(), dummy_input_ids.size(), dummy_input_ids_shape.data(), dummy_input_ids_shape.size())); input_tensors.push_back(Ort::Value::CreateTensor<float>(memory_info, (float*) input_vector.data(), input_vector.size(), input_shape.data(), input_shape.size())); input_tensors.push_back(Ort::Value::CreateTensor<int64_t>(memory_info, (int64_t*) dummy_attention_mask.data(), dummy_attention_mask.size(), dummy_attention_mask_shape.data(), dummy_attention_mask_shape.size())); std::vector<const char*> output_names = {"image_embeds"}; // run inference // LOG(INFO) << "Running image embedder"; std::unique_lock<std::mutex> lock(mutex_); auto output_tensors = session_->Run(Ort::RunOptions{nullptr}, input_names.data(), input_tensors.data(), input_tensors.size(), output_names.data(), output_names.size()); lock.unlock(); // get output tensor auto output_tensor = output_tensors.front().GetTensorMutableData<float>(); auto shape = output_tensors.front().GetTensorTypeAndShapeInfo().GetShape(); if (shape.size() != 2) { return std::vector<embedding_res_t>(inputs.size(), embedding_res_t(400, "Invalid shape of output tensor")); } std::vector<embedding_res_t> output(inputs.size()); i = 0; for (int j = 0; j < shape[0]; j++) { while (results.find(i) != results.end()) { output[i] = results[i]; i++; } std::vector<float> output_vector; for (int k = 0; k < shape[1]; k++) { output_vector.push_back(output_tensor[j * shape[1] + k]); } output[i] = embedding_res_t(std::move(output_vector)); i++; } return output; }
5,725
C++
.cpp
112
44.517857
215
0.651021
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,811
aq_model_manager.cpp
typesense_typesense/src/aq_model_manager.cpp
#include <filesystem> #include <map> #include "vq_model_manager.h" #include "http_client.h" const std::string VQModelManager::get_model_url(const std::string& model_name) { return MODELS_REPO_URL + "voice_query/" + model_name + ".bin"; } const std::string VQModelManager::get_config_url() { return MODELS_REPO_URL + "voice_query/models.json"; } const std::string VQModelManager::get_model_namespace(const std::string& model_name) { // <namespace>/<model_name> if / is present in model_name if(model_name.find("/") != std::string::npos) { return model_name.substr(0, model_name.find("/")); } else { return "unknown"; } } const Option<nlohmann::json> VQModelManager::get_config() { auto config_url = get_config_url(); auto& client = HttpClient::get_instance(); std::string res; std::map<std::string, std::string> headers; auto response = client.get_response(config_url, res, headers); if (response != 200) { return Option<nlohmann::json>(400, "Failed to get model config file"); } try { auto config = nlohmann::json::parse(res); return Option<nlohmann::json>(config); } catch (const std::exception& e) { return Option<nlohmann::json>(400, "Failed to parse model config file"); } } const std::string VQModelManager::get_absolute_model_path(const std::string& model_name) { auto voice_query_home = EmbedderManager::get_model_dir(); voice_query_home += voice_query_home.back() == '/' ? "" : "/"; voice_query_home += "voice_query"; std::filesystem::path path(voice_query_home); if (!std::filesystem::exists(path)) { std::filesystem::create_directory(path); } auto model_namespace = get_model_namespace(model_name); std::filesystem::path model_path(voice_query_home.back() == '/' ? voice_query_home + model_namespace : voice_query_home + "/" + model_namespace); if (!std::filesystem::exists(model_path)) { std::filesystem::create_directory(model_path); } return voice_query_home.back() == '/' ? voice_query_home + model_name + ".bin" : voice_query_home + "/" + model_name + ".bin"; } Option<bool> VQModelManager::download_model(const std::string& model_name) { auto model_path = get_absolute_model_path(model_name); auto config = get_config(); if (!config.ok()) { return Option<bool>(config.code(), config.error()); } auto config_json = config.get(); if (config_json.find(model_name + ".bin") == config_json.end()) { return Option<bool>(400, "Voice query model not found"); } auto model_md5 = config_json[model_name + ".bin"].get<std::string>(); if(EmbedderManager::check_md5(model_path, model_md5)) { return Option<bool>(true); } std::unique_lock<std::mutex> lock(download_mutex); auto model_url = get_model_url(model_name); auto& client = HttpClient::get_instance(); auto response = client.download_file(model_url, model_path); LOG(INFO) << "Downloading model " << model_name << " from " << model_url << " to " << model_path; if (response != 200) { LOG(INFO) << response; return Option<bool>(400, "Failed to download voice query model"); } return Option<bool>(true); } Option<std::shared_ptr<VQModel>> VQModelManager::validate_and_init_model(const std::string& model_name) { if(models.find(model_name) != models.end()) { return Option<std::shared_ptr<VQModel>>(models[model_name]); } auto model_namespace = get_model_namespace(model_name); if(model_namespace != "ts") { return Option<std::shared_ptr<VQModel>>(400, "Unknown model namespace"); } auto model_name_without_namespace = get_model_name_without_namespace(model_name); auto download_res = download_model(model_name_without_namespace); if (!download_res.ok()) { return Option<std::shared_ptr<VQModel>>(download_res.code(), download_res.error()); } auto model_path = get_absolute_model_path(model_name_without_namespace); auto model_inner_namespace = get_model_namespace(model_name_without_namespace); if (model_inner_namespace == "whisper") { auto whisper_ctx = WhisperModel::validate_and_load_model(model_path); if (!whisper_ctx) { return Option<std::shared_ptr<VQModel>>(400, "Failed to load voice query model"); } auto whisper_model = std::shared_ptr<VQModel>(new WhisperModel(whisper_ctx, model_name)); { std::unique_lock<std::shared_mutex> lock(models_mutex); models[model_name] = whisper_model; } return Option<std::shared_ptr<VQModel>>(whisper_model); } else { return Option<std::shared_ptr<VQModel>>(400, "Unknown model namespace"); } } Option<std::shared_ptr<VQModel>> VQModelManager::get_model(const std::string& model_name) { std::shared_lock<std::shared_mutex> lock(models_mutex); auto model = models.find(model_name); if (model == models.end()) { return Option<std::shared_ptr<VQModel>>(400, "Voice query model not found"); } return Option<std::shared_ptr<VQModel>>(model->second); } void VQModelManager::delete_model(const std::string& model_name) { std::unique_lock<std::shared_mutex> lock(models_mutex); auto model = models.find(model_name); if (model != models.end()) { models.erase(model); } } void VQModelManager::delete_all_models() { std::unique_lock<std::shared_mutex> lock(models_mutex); models.clear(); } void VQModelManager::clear_unused_models() { std::unique_lock<std::shared_mutex> lock(models_mutex); for (auto it = models.begin(); it != models.end();) { if (it->second->get_collection_ref_count() == 0) { it = models.erase(it); } else { it++; } } } const std::string VQModelManager::get_model_name_without_namespace(const std::string& model_name) { if(model_name.find("/") != std::string::npos) { return model_name.substr(model_name.find("/") + 1); } else { return model_name; } } VQModelManager::~VQModelManager() { delete_all_models(); }
6,179
C++
.cpp
143
37.615385
149
0.65366
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,812
raft_server.cpp
typesense_typesense/src/raft_server.cpp
#include "store.h" #include "raft_server.h" #include <butil/files/file_enumerator.h> #include <thread> #include <algorithm> #include <string_utils.h> #include <file_utils.h> #include <collection_manager.h> #include <http_client.h> #include <conversation_model_manager.h> #include "rocksdb/utilities/checkpoint.h" #include "thread_local_vars.h" #include "core_api.h" #include "personalization_model_manager.h" namespace braft { DECLARE_int32(raft_do_snapshot_min_index_gap); DECLARE_int32(raft_max_parallel_append_entries_rpc_num); DECLARE_bool(raft_enable_append_entries_cache); DECLARE_int32(raft_max_append_entries_cache_size); DECLARE_int32(raft_max_byte_count_per_rpc); DECLARE_int32(raft_rpc_channel_connect_timeout_ms); } void ReplicationClosure::Run() { // nothing much to do here since responding to client is handled upstream // Auto delete `this` after Run() std::unique_ptr<ReplicationClosure> self_guard(this); } // State machine implementation int ReplicationState::start(const butil::EndPoint & peering_endpoint, const int api_port, int election_timeout_ms, int snapshot_max_byte_count_per_rpc, const std::string & raft_dir, const std::string & nodes, const std::atomic<bool>& quit_abruptly) { this->election_timeout_interval_ms = election_timeout_ms; this->raft_dir_path = raft_dir; this->peering_endpoint = peering_endpoint; braft::NodeOptions node_options; size_t max_tries = 3; while(true) { std::string actual_nodes_config = to_nodes_config(peering_endpoint, api_port, nodes); if(node_options.initial_conf.parse_from(actual_nodes_config) != 0) { if(--max_tries == 0) { LOG(ERROR) << "Giving up parsing nodes configuration: `" << nodes << "`"; return -1; } LOG(ERROR) << "Failed to parse nodes configuration: `" << nodes << "` -- " << " will retry shortly..."; size_t i = 0; while(i++ < 30) { std::this_thread::sleep_for(std::chrono::seconds(1)); if(quit_abruptly) { // enables quitting of server during retries return -1; } } continue; } LOG(INFO) << "Nodes configuration: " << actual_nodes_config; break; } this->read_caught_up = false; this->write_caught_up = false; // do snapshot only when the gap between applied index and last snapshot index is >= this number braft::FLAGS_raft_do_snapshot_min_index_gap = 1; // flags for controlling parallelism of append entries braft::FLAGS_raft_max_parallel_append_entries_rpc_num = 1; braft::FLAGS_raft_enable_append_entries_cache = false; braft::FLAGS_raft_max_append_entries_cache_size = 8; // flag controls snapshot download size of each RPC braft::FLAGS_raft_max_byte_count_per_rpc = snapshot_max_byte_count_per_rpc; braft::FLAGS_raft_rpc_channel_connect_timeout_ms = 2000; // automatic snapshot is disabled since it caused issues during slow follower catch-ups node_options.snapshot_interval_s = -1; node_options.catchup_margin = config->get_healthy_read_lag(); node_options.election_timeout_ms = election_timeout_ms; node_options.fsm = this; node_options.node_owns_fsm = false; node_options.filter_before_copy_remote = true; std::string prefix = "local://" + raft_dir; node_options.log_uri = prefix + "/" + log_dir_name; node_options.raft_meta_uri = prefix + "/" + meta_dir_name; node_options.snapshot_uri = prefix + "/" + snapshot_dir_name; node_options.disable_cli = true; // api_port is used as the node identifier braft::Node* node = new braft::Node("default_group", braft::PeerId(peering_endpoint, api_port)); std::string snapshot_dir = raft_dir + "/" + snapshot_dir_name; bool snapshot_exists = dir_enum_count(snapshot_dir) > 0; if(snapshot_exists) { // we will be assured of on_snapshot_load() firing and we will wait for that to init_db() } else { LOG(INFO) << "Snapshot does not exist. We will remove db dir and init db fresh."; int reload_store = store->reload(true, ""); if(reload_store != 0) { return reload_store; } int init_db_status = init_db(); if(init_db_status != 0) { LOG(ERROR) << "Failed to initialize DB."; return init_db_status; } } if (node->init(node_options) != 0) { LOG(ERROR) << "Fail to init peering node"; delete node; return -1; } braft::NodeStatus node_status; node->get_status(&node_status); LOG(INFO) << "Node last_index: " << node_status.last_index; std::unique_lock lock(node_mutex); this->node = node; return 0; } std::string ReplicationState::to_nodes_config(const butil::EndPoint& peering_endpoint, const int api_port, const std::string& nodes_config) { if(nodes_config.empty()) { std::string ip_str = butil::ip2str(peering_endpoint.ip).c_str(); return ip_str + ":" + std::to_string(peering_endpoint.port) + ":" + std::to_string(api_port); } else { return resolve_node_hosts(nodes_config); } } string ReplicationState::resolve_node_hosts(const string& nodes_config) { std::vector<std::string> final_nodes_vec; std::vector<std::string> node_strings; StringUtils::split(nodes_config, node_strings, ","); for(const auto& node_str: node_strings) { // could be an IP or a hostname that must be resolved std::vector<std::string> node_parts; StringUtils::split(node_str, node_parts, ":"); if(node_parts.size() != 3) { final_nodes_vec.push_back(node_str); continue; } if(node_parts[0].size() > 64) { LOG(ERROR) << "Host name is too long (must be < 64 characters): " << node_parts[0]; final_nodes_vec.emplace_back(""); continue; } butil::ip_t ip; int status = butil::hostname2ip(node_parts[0].c_str(), &ip); if(status == 0) { final_nodes_vec.push_back( std::string(butil::ip2str(ip).c_str()) + ":" + node_parts[1] + ":" + node_parts[2] ); } else { LOG(ERROR) << "Unable to resolve host: " << node_parts[0]; final_nodes_vec.push_back(node_str); } } std::string final_nodes_config = StringUtils::join(final_nodes_vec, ","); return final_nodes_config; } Option<bool> ReplicationState::handle_gzip(const std::shared_ptr<http_req>& request) { if (!request->zstream_initialized) { request->zs.zalloc = Z_NULL; request->zs.zfree = Z_NULL; request->zs.opaque = Z_NULL; request->zs.avail_in = 0; request->zs.next_in = Z_NULL; if (inflateInit2(&request->zs, 16 + MAX_WBITS) != Z_OK) { return Option<bool>(400, "inflateInit failed while decompressing"); } request->zstream_initialized = true; } std::string outbuffer; outbuffer.resize(10 * request->body.size()); request->zs.next_in = (Bytef *) request->body.c_str(); request->zs.avail_in = request->body.size(); std::size_t size_uncompressed = 0; int ret = 0; do { request->zs.avail_out = static_cast<unsigned int>(outbuffer.size()); request->zs.next_out = reinterpret_cast<Bytef *>(&outbuffer[0] + size_uncompressed); ret = inflate(&request->zs, Z_FINISH); if (ret != Z_STREAM_END && ret != Z_OK && ret != Z_BUF_ERROR) { std::string error_msg = request->zs.msg; inflateEnd(&request->zs); return Option<bool>(400, error_msg); } size_uncompressed += (outbuffer.size() - request->zs.avail_out); } while (request->zs.avail_out == 0); if (ret == Z_STREAM_END) { request->zstream_initialized = false; inflateEnd(&request->zs); } outbuffer.resize(size_uncompressed); request->body = outbuffer; request->chunk_len = outbuffer.size(); return Option<bool>(true); } void ReplicationState::write(const std::shared_ptr<http_req>& request, const std::shared_ptr<http_res>& response) { if(shutting_down) { //LOG(INFO) << "write(), force shutdown"; response->set_503("Shutting down."); response->final = true; response->is_alive = false; request->notify(); return ; } // reject write if disk space is running out auto resource_check = cached_resource_stat_t::get_instance().has_enough_resources(raft_dir_path, config->get_disk_used_max_percentage(), config->get_memory_used_max_percentage()); if (resource_check != cached_resource_stat_t::OK && request->do_resource_check()) { response->set_422("Rejecting write: running out of resource type: " + std::string(magic_enum::enum_name(resource_check))); response->final = true; auto req_res = new async_req_res_t(request, response, true); return message_dispatcher->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); } if(config->get_skip_writes() && request->path_without_query != "/config") { response->set_422("Skipping writes."); response->final = true; auto req_res = new async_req_res_t(request, response, true); return message_dispatcher->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); } route_path* rpath = nullptr; bool route_found = server->get_route(request->route_hash, &rpath); if(route_found && rpath->handler == patch_update_collection) { if(get_alter_in_progress(request->params["collection"])) { // This is checked only during live writes from a http request: we do this because we want to only // throttle concurrent successive alter requests, but want to allow an alter that happens after another // finishes via raft log replay. response->set_422("Another collection update operation is in progress."); response->final = true; auto req_res = new async_req_res_t(request, response, true); return message_dispatcher->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); } } std::shared_lock lock(node_mutex); if(!node) { return ; } if (!node->is_leader()) { return write_to_leader(request, response); } //check if it's first gzip chunk or is gzip stream initialized if(((request->body.size() > 2) && (31 == (int)request->body[0] && -117 == (int)request->body[1])) || request->zstream_initialized) { auto res = handle_gzip(request); if(!res.ok()) { response->set_422(res.error()); response->final = true; auto req_res = new async_req_res_t(request, response, true); return message_dispatcher->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); } } // Serialize request to replicated WAL so that all the nodes in the group receive it as well. // NOTE: actual write must be done only on the `on_apply` method to maintain consistency. butil::IOBufBuilder bufBuilder; bufBuilder << request->to_json(); //LOG(INFO) << "write() pre request ref count " << request.use_count(); // Apply this log as a braft::Task braft::Task task; task.data = &bufBuilder.buf(); // This callback would be invoked when the task actually executes or fails task.done = new ReplicationClosure(request, response); //LOG(INFO) << "write() post request ref count " << request.use_count(); // To avoid ABA problem task.expected_term = leader_term.load(butil::memory_order_relaxed); //LOG(INFO) << ":::" << "body size before apply: " << request->body.size(); // Now the task is applied to the group node->apply(task); pending_writes++; } void ReplicationState::write_to_leader(const std::shared_ptr<http_req>& request, const std::shared_ptr<http_res>& response) { // no lock on `node` needed as caller uses the lock if(!node || node->leader_id().is_empty()) { // Handle no leader scenario LOG(ERROR) << "Rejecting write: could not find a leader."; if(response->proxied_stream) { // streaming in progress: ensure graceful termination (cannot start response again) LOG(ERROR) << "Terminating streaming request gracefully."; response->is_alive = false; request->notify(); return ; } response->set_500("Could not find a leader."); auto req_res = new async_req_res_t(request, response, true); return message_dispatcher->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); } if (response->proxied_stream) { // indicates async request body of in-flight request //LOG(INFO) << "Inflight proxied request, returning control to caller, body_size=" << request->body.size(); request->notify(); return ; } const std::string & leader_addr = node->leader_id().to_string(); //LOG(INFO) << "Redirecting write to leader at: " << leader_addr; h2o_custom_generator_t* custom_generator = reinterpret_cast<h2o_custom_generator_t *>(response->generator.load()); HttpServer* server = custom_generator->h2o_handler->http_server; auto raw_req = request->_req; const std::string& path = std::string(raw_req->path.base, raw_req->path.len); const std::string& scheme = std::string(raw_req->scheme->name.base, raw_req->scheme->name.len); const std::string url = get_node_url_path(leader_addr, path, scheme); thread_pool->enqueue([request, response, server, path, url, this]() { pending_writes++; std::map<std::string, std::string> res_headers; if(request->http_method == "POST") { std::vector<std::string> path_parts; StringUtils::split(path, path_parts, "/"); if(path_parts.back().rfind("import", 0) == 0) { // imports are handled asynchronously response->proxied_stream = true; long status = HttpClient::post_response_async(url, request, response, server, true); if(status == 500) { response->content_type_header = res_headers["content-type"]; response->set_500(""); } else { return ; } } else { std::string api_res; long status = HttpClient::post_response(url, request->body, api_res, res_headers, {}, 0, true); response->content_type_header = res_headers["content-type"]; response->set_body(status, api_res); } } else if(request->http_method == "PUT") { std::string api_res; long status = HttpClient::put_response(url, request->body, api_res, res_headers, 0, true); response->content_type_header = res_headers["content-type"]; response->set_body(status, api_res); } else if(request->http_method == "DELETE") { std::string api_res; // timeout: 0 since delete can take a long time long status = HttpClient::delete_response(url, api_res, res_headers, 0, true); response->content_type_header = res_headers["content-type"]; response->set_body(status, api_res); } else if(request->http_method == "PATCH") { std::string api_res; route_path* rpath = nullptr; bool route_found = server->get_route(request->route_hash, &rpath); long status = HttpClient::patch_response(url, request->body, api_res, res_headers, 0, true); response->content_type_header = res_headers["content-type"]; response->set_body(status, api_res); } else { const std::string& err = "Forwarding for http method not implemented: " + request->http_method; LOG(ERROR) << err; response->set_500(err); } auto req_res = new async_req_res_t(request, response, true); message_dispatcher->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); pending_writes--; }); } std::string ReplicationState::get_node_url_path(const std::string& node_addr, const std::string& path, const std::string& protocol) const { std::vector<std::string> addr_parts; StringUtils::split(node_addr, addr_parts, ":"); std::string leader_host_port = addr_parts[0] + ":" + addr_parts[2]; std::string url = protocol + "://" + leader_host_port + path; return url; } void ReplicationState::on_apply(braft::Iterator& iter) { //LOG(INFO) << "ReplicationState::on_apply"; // NOTE: this is executed on a different thread and runs concurrent to http thread // A batch of tasks are committed, which must be processed through // |iter| for (; iter.valid(); iter.next()) { // Guard invokes replication_arg->done->Run() asynchronously to avoid the callback blocking the main thread braft::AsyncClosureGuard closure_guard(iter.done()); //LOG(INFO) << "Apply entry"; const std::shared_ptr<http_req>& request_generated = iter.done() ? dynamic_cast<ReplicationClosure*>(iter.done())->get_request() : std::make_shared<http_req>(); //LOG(INFO) << "Post assignment " << request_generated.get() << ", use count: " << request_generated.use_count(); const std::shared_ptr<http_res>& response_generated = iter.done() ? dynamic_cast<ReplicationClosure*>(iter.done())->get_response() : std::make_shared<http_res>(nullptr); if(!iter.done()) { // indicates log serialized request request_generated->load_from_json(iter.data().to_string()); } request_generated->log_index = iter.index(); // To avoid blocking the serial Raft write thread persist the log entry in local storage. // Actual operations will be done in collection-sharded batch indexing threads. batched_indexer->enqueue(request_generated, response_generated); if(iter.done()) { pending_writes--; //LOG(INFO) << "pending_writes: " << pending_writes; } } } void ReplicationState::read(const std::shared_ptr<http_res>& response) { // NOT USED: // For consistency, reads to followers could be rejected. // Currently, we don't do implement reads via raft. } void* ReplicationState::save_snapshot(void* arg) { LOG(INFO) << "save_snapshot called"; SnapshotArg* sa = static_cast<SnapshotArg*>(arg); std::unique_ptr<SnapshotArg> arg_guard(sa); // add the db snapshot files to writer state butil::FileEnumerator dir_enum(butil::FilePath(sa->db_snapshot_path), false, butil::FileEnumerator::FILES); for (butil::FilePath file = dir_enum.Next(); !file.empty(); file = dir_enum.Next()) { std::string file_name = std::string(db_snapshot_name) + "/" + file.BaseName().value(); if (sa->writer->add_file(file_name) != 0) { sa->done->status().set_error(EIO, "Fail to add file to writer."); sa->replication_state->snapshot_in_progress = false; return nullptr; } } if(!sa->analytics_db_snapshot_path.empty()) { //add analytics db snapshot files to writer state butil::FileEnumerator analytics_dir_enum(butil::FilePath(sa->analytics_db_snapshot_path), false, butil::FileEnumerator::FILES); for (butil::FilePath file = analytics_dir_enum.Next(); !file.empty(); file = analytics_dir_enum.Next()) { auto file_name = std::string(analytics_db_snapshot_name) + "/" + file.BaseName().value(); if (sa->writer->add_file(file_name) != 0) { sa->done->status().set_error(EIO, "Fail to add analytics file to writer."); sa->replication_state->snapshot_in_progress = false; return nullptr; } } } const std::string& temp_snapshot_dir = sa->writer->get_path(); sa->done->Run(); // if an external snapshot is requested, copy latest snapshot directory into that if(!sa->ext_snapshot_path.empty()) { // temp directory will be moved to final snapshot directory, so let's wait for that to happen while(butil::DirectoryExists(butil::FilePath(temp_snapshot_dir))) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); } LOG(INFO) << "Copying system snapshot to external snapshot directory at " << sa->ext_snapshot_path; const butil::FilePath& dest_state_dir = butil::FilePath(sa->ext_snapshot_path + "/state"); if(!butil::DirectoryExists(dest_state_dir)) { butil::CreateDirectory(dest_state_dir, true); } const butil::FilePath& src_snapshot_dir = butil::FilePath(sa->state_dir_path + "/snapshot"); const butil::FilePath& src_meta_dir = butil::FilePath(sa->state_dir_path + "/meta"); bool snapshot_copied = butil::CopyDirectory(src_snapshot_dir, dest_state_dir, true); bool meta_copied = butil::CopyDirectory(src_meta_dir, dest_state_dir, true); sa->replication_state->ext_snapshot_succeeded = snapshot_copied && meta_copied; } // notify on demand closure that external snapshotting is done sa->replication_state->notify(); // NOTE: *must* do a dummy write here since snapshots cannot be triggered if no write has happened since the // last snapshot. By doing a dummy write right after a snapshot, we ensure that this can never be the case. sa->replication_state->do_dummy_write(); sa->replication_state->snapshot_in_progress = false; LOG(INFO) << "save_snapshot done"; return nullptr; } // this method is serial to on_apply so guarantees a snapshot view of the state machine void ReplicationState::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) { LOG(INFO) << "on_snapshot_save"; snapshot_in_progress = true; std::string db_snapshot_path = writer->get_path() + "/" + db_snapshot_name; std::string analytics_db_snapshot_path = writer->get_path() + "/" + analytics_db_snapshot_name; { // grab batch indexer lock so that we can take a clean snapshot std::shared_mutex& pause_mutex = batched_indexer->get_pause_mutex(); std::unique_lock lk(pause_mutex); nlohmann::json batch_index_state; batched_indexer->serialize_state(batch_index_state); store->insert(BATCHED_INDEXER_STATE_KEY, batch_index_state.dump()); // we will delete all the skip indices in meta store and flush that DB // this will block writes, but should be pretty fast batched_indexer->clear_skip_indices(); rocksdb::Checkpoint* checkpoint = nullptr; rocksdb::Status status = store->create_check_point(&checkpoint, db_snapshot_path); std::unique_ptr<rocksdb::Checkpoint> checkpoint_guard(checkpoint); if(!status.ok()) { LOG(ERROR) << "Failure during checkpoint creation, msg:" << status.ToString(); done->status().set_error(EIO, "Checkpoint creation failure."); } if(analytics_store) { // to ensure that in-memory table is sent to disk (we don't use WAL) analytics_store->flush(); rocksdb::Checkpoint* checkpoint2 = nullptr; status = analytics_store->create_check_point(&checkpoint2, analytics_db_snapshot_path); std::unique_ptr<rocksdb::Checkpoint> checkpoint_guard(checkpoint2); if(!status.ok()) { LOG(ERROR) << "AnalyticsStore : Failure during checkpoint creation, msg:" << status.ToString(); done->status().set_error(EIO, "AnalyticsStore : Checkpoint creation failure."); } } } SnapshotArg* arg = new SnapshotArg; arg->replication_state = this; arg->writer = writer; arg->state_dir_path = raft_dir_path; arg->db_snapshot_path = db_snapshot_path; arg->done = done; if(analytics_store) { arg->analytics_db_snapshot_path = analytics_db_snapshot_path; } if(!ext_snapshot_path.empty()) { arg->ext_snapshot_path = ext_snapshot_path; ext_snapshot_path = ""; } // Start a new bthread to avoid blocking StateMachine for slower operations that don't need a blocking view bthread_t tid; bthread_start_urgent(&tid, NULL, save_snapshot, arg); } int ReplicationState::init_db() { LOG(INFO) << "Loading collections from disk..."; Option<bool> init_op = CollectionManager::get_instance().load( num_collections_parallel_load, num_documents_parallel_load ); if(init_op.ok()) { LOG(INFO) << "Finished loading collections from disk."; } else { LOG(ERROR)<< "Typesense failed to start. " << "Could not load collections from disk: " << init_op.error(); return 1; } // important to init conversation models only after all collections have been loaded auto conversation_models_init = ConversationModelManager::init(store); if(!conversation_models_init.ok()) { LOG(INFO) << "Failed to initialize conversation model manager: " << conversation_models_init.error(); } else { LOG(INFO) << "Loaded " << conversation_models_init.get() << "conversation model(s)."; } if(batched_indexer != nullptr) { LOG(INFO) << "Initializing batched indexer from snapshot state..."; std::string batched_indexer_state_str; StoreStatus s = store->get(BATCHED_INDEXER_STATE_KEY, batched_indexer_state_str); if(s == FOUND) { nlohmann::json batch_indexer_state = nlohmann::json::parse(batched_indexer_state_str); batched_indexer->load_state(batch_indexer_state); } } auto personalization_models_init = PersonalizationModelManager::init(store); if(!personalization_models_init.ok()) { LOG(INFO) << "Failed to initialize personalization model manager: " << personalization_models_init.error(); } else { LOG(INFO) << "Loaded " << personalization_models_init.get() << " personalization model(s)."; } return 0; } int ReplicationState::on_snapshot_load(braft::SnapshotReader* reader) { std::shared_lock lock(node_mutex); CHECK(!node || !node->is_leader()) << "Leader is not supposed to load snapshot"; lock.unlock(); LOG(INFO) << "on_snapshot_load"; // ensures that reads and writes are rejected, as `store->reload()` unique locks the DB handle read_caught_up = false; write_caught_up = false; // Load snapshot from leader, replacing the running StateMachine std::string analytics_snapshot_path = reader->get_path(); analytics_snapshot_path.append(std::string("/") + analytics_db_snapshot_name); if(analytics_store && directory_exists(analytics_snapshot_path)) { // analytics db snapshot could be missing (older version or disabled earlier) int reload_store = analytics_store->reload(true, analytics_snapshot_path, Config::get_instance().get_analytics_db_ttl()); if (reload_store != 0) { LOG(ERROR) << "Failed to reload analytics db snapshot."; return reload_store; } } std::string db_snapshot_path = reader->get_path(); db_snapshot_path.append(std::string("/") + db_snapshot_name); int reload_store = store->reload(true, db_snapshot_path); if(reload_store != 0) { return reload_store; } bool init_db_status = init_db(); return init_db_status; } void ReplicationState::refresh_nodes(const std::string & nodes, const size_t raft_counter, const std::atomic<bool>& reset_peers_on_error) { std::shared_lock lock(node_mutex); if(!node) { LOG(WARNING) << "Node state is not initialized: unable to refresh nodes."; return ; } braft::Configuration new_conf; new_conf.parse_from(nodes); braft::NodeStatus nodeStatus; node->get_status(&nodeStatus); LOG(INFO) << "Term: " << nodeStatus.term << ", pending_queue: " << nodeStatus.pending_queue_size << ", last_index: " << nodeStatus.last_index << ", committed: " << nodeStatus.committed_index << ", known_applied: " << nodeStatus.known_applied_index << ", applying: " << nodeStatus.applying_index << ", pending_writes: " << pending_writes << ", queued_writes: " << batched_indexer->get_queued_writes() << ", local_sequence: " << store->get_latest_seq_number(); if(node->is_leader()) { RefreshNodesClosure* refresh_nodes_done = new RefreshNodesClosure; node->change_peers(new_conf, refresh_nodes_done); } else { if(node->leader_id().is_empty()) { // When node is not a leader, does not have a leader and is also a single-node cluster, // we forcefully reset its peers. // NOTE: `reset_peers()` is not a safe call to make as we give up on consistency and consensus guarantees. // We are doing this solely to handle single node cluster whose IP changes. // Examples: Docker container IP change, local DHCP leased IP change etc. std::vector<braft::PeerId> latest_nodes; new_conf.list_peers(&latest_nodes); if(latest_nodes.size() == 1 || (raft_counter > 0 && reset_peers_on_error)) { LOG(WARNING) << "Node with no leader. Resetting peers of size: " << latest_nodes.size(); node->reset_peers(new_conf); } else { LOG(WARNING) << "Multi-node with no leader: refusing to reset peers."; } return ; } } } void ReplicationState::refresh_catchup_status(bool log_msg) { std::shared_lock lock(node_mutex); if(node == nullptr ) { read_caught_up = write_caught_up = false; return ; } bool is_leader = node->is_leader(); bool leader_or_follower = (is_leader || !node->leader_id().is_empty()); if(!leader_or_follower) { read_caught_up = write_caught_up = false; return ; } braft::NodeStatus n_status; node->get_status(&n_status); lock.unlock(); // `known_applied_index` guaranteed to be atleast 1 if raft log is available (after snapshot loading etc.) if(n_status.known_applied_index == 0) { LOG_IF(ERROR, log_msg) << "Node not ready yet (known_applied_index is 0)."; read_caught_up = write_caught_up = false; return ; } // work around for: https://github.com/baidu/braft/issues/277#issuecomment-823080171 int64_t current_index = (n_status.applying_index == 0) ? n_status.known_applied_index : n_status.applying_index; int64_t apply_lag = n_status.last_index - current_index; // in addition to raft level lag, we should also account for internal batched write queue int64_t num_queued_writes = batched_indexer->get_queued_writes(); //LOG(INFO) << "last_index: " << n_status.applying_index << ", known_applied_index: " << n_status.known_applied_index; //LOG(INFO) << "apply_lag: " << apply_lag; int healthy_read_lag = config->get_healthy_read_lag(); int healthy_write_lag = config->get_healthy_write_lag(); if (apply_lag > healthy_read_lag) { LOG_IF(ERROR, log_msg) << apply_lag << " lagging entries > healthy read lag of " << healthy_read_lag; this->read_caught_up = false; } else { if(num_queued_writes > healthy_read_lag) { LOG_IF(ERROR, log_msg) << num_queued_writes << " queued writes > healthy read lag of " << healthy_read_lag; this->read_caught_up = false; } else { this->read_caught_up = true; } } if (apply_lag > healthy_write_lag) { LOG_IF(ERROR, log_msg) << apply_lag << " lagging entries > healthy write lag of " << healthy_write_lag; this->write_caught_up = false; } else { if(num_queued_writes > healthy_write_lag) { LOG_IF(ERROR, log_msg) << num_queued_writes << " queued writes > healthy write lag of " << healthy_write_lag; this->write_caught_up = false; } else { this->write_caught_up = true; } } if(is_leader || !this->read_caught_up) { // no need to re-check status with leader return ; } lock.lock(); if(node->leader_id().is_empty()) { LOG(ERROR) << "Could not get leader status, as node does not have a leader!"; return ; } const std::string & leader_addr = node->leader_id().to_string(); lock.unlock(); const std::string protocol = api_uses_ssl ? "https" : "http"; std::string url = get_node_url_path(leader_addr, "/status", protocol); std::string api_res; std::map<std::string, std::string> res_headers; long status_code = HttpClient::get_response(url, api_res, res_headers, {}, 5*1000, true); if(status_code == 200) { // compare leader's applied log with local applied to see if we are lagging nlohmann::json leader_status = nlohmann::json::parse(api_res); if(leader_status.contains("committed_index")) { int64_t leader_committed_index = leader_status["committed_index"].get<int64_t>(); if(leader_committed_index <= n_status.committed_index) { // this can happen due to network latency in making the /status call // we will refrain from changing current status return ; } this->read_caught_up = ((leader_committed_index - n_status.committed_index) < healthy_read_lag); } else { // we will refrain from changing current status LOG(ERROR) << "Error, `committed_index` key not found in /status response from leader."; } } else { // we will again refrain from changing current status LOG(ERROR) << "Error, /status end-point returned bad status code " << status_code; } } ReplicationState::ReplicationState(HttpServer* server, BatchedIndexer* batched_indexer, Store *store, Store* analytics_store, ThreadPool* thread_pool, http_message_dispatcher *message_dispatcher, bool api_uses_ssl, const Config* config, size_t num_collections_parallel_load, size_t num_documents_parallel_load): node(nullptr), leader_term(-1), server(server), batched_indexer(batched_indexer), store(store), analytics_store(analytics_store), thread_pool(thread_pool), message_dispatcher(message_dispatcher), api_uses_ssl(api_uses_ssl), config(config), num_collections_parallel_load(num_collections_parallel_load), num_documents_parallel_load(num_documents_parallel_load), read_caught_up(false), write_caught_up(false), ready(false), shutting_down(false), pending_writes(0), snapshot_in_progress(false), last_snapshot_ts(std::time(nullptr)), snapshot_interval_s(config->get_snapshot_interval_seconds()) { } bool ReplicationState::is_alive() const { // for general health check we will only care about the `read_caught_up` threshold return read_caught_up; } uint64_t ReplicationState::node_state() const { std::shared_lock lock(node_mutex); if(node == nullptr) { return 0; } braft::NodeStatus node_status; node->get_status(&node_status); return node_status.state; } void ReplicationState::do_snapshot(const std::string& snapshot_path, const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) { if(node == nullptr) { res->set_500("Could not trigger a snapshot, as node is not initialized."); auto req_res = new async_req_res_t(req, res, true); get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); return ; } if(snapshot_in_progress) { res->set_409("Another snapshot is in progress."); auto req_res = new async_req_res_t(req, res, true); get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); return ; } LOG(INFO) << "Triggering an on demand snapshot" << (!snapshot_path.empty() ? " with external snapshot path..." : "..."); thread_pool->enqueue([&snapshot_path, req, res, this]() { OnDemandSnapshotClosure* snapshot_closure = new OnDemandSnapshotClosure(this, req, res); ext_snapshot_path = snapshot_path; std::shared_lock lock(this->node_mutex); node->snapshot(snapshot_closure); }); } void ReplicationState::set_ext_snapshot_path(const std::string& snapshot_path) { this->ext_snapshot_path = snapshot_path; } const std::string &ReplicationState::get_ext_snapshot_path() const { return ext_snapshot_path; } void ReplicationState::do_dummy_write() { std::shared_lock lock(node_mutex); if(!node || node->leader_id().is_empty()) { LOG(ERROR) << "Could not do a dummy write, as node does not have a leader"; return ; } const std::string & leader_addr = node->leader_id().to_string(); lock.unlock(); const std::string protocol = api_uses_ssl ? "https" : "http"; std::string url = get_node_url_path(leader_addr, "/health", protocol); std::string api_res; std::map<std::string, std::string> res_headers; long status_code = HttpClient::post_response(url, "", api_res, res_headers, {}, 4000, true); LOG(INFO) << "Dummy write to " << url << ", status = " << status_code << ", response = " << api_res; } bool ReplicationState::trigger_vote() { std::shared_lock lock(node_mutex); if(node) { auto status = node->vote(election_timeout_interval_ms); LOG(INFO) << "Triggered vote. Ok? " << status.ok() << ", status: " << status; return status.ok(); } return false; } bool ReplicationState::reset_peers() { std::shared_lock lock(node_mutex); if(node) { const Option<std::string> & refreshed_nodes_op = Config::fetch_nodes_config(config->get_nodes()); if(!refreshed_nodes_op.ok()) { LOG(WARNING) << "Error while fetching peer configuration: " << refreshed_nodes_op.error(); return false; } const std::string& nodes_config = ReplicationState::to_nodes_config(peering_endpoint, Config::get_instance().get_api_port(), refreshed_nodes_op.get()); braft::Configuration peer_config; peer_config.parse_from(nodes_config); std::vector<braft::PeerId> peers; peer_config.list_peers(&peers); auto status = node->reset_peers(peer_config); LOG(INFO) << "Reset peers. Ok? " << status.ok() << ", status: " << status; LOG(INFO) << "New peer config is: " << peer_config; return status.ok(); } return false; } http_message_dispatcher* ReplicationState::get_message_dispatcher() const { return message_dispatcher; } Store* ReplicationState::get_store() { return store; } void ReplicationState::shutdown() { LOG(INFO) << "Set shutting_down = true"; shutting_down = true; // wait for pending writes to drop to zero LOG(INFO) << "Waiting for in-flight writes to finish..."; while(pending_writes.load() != 0) { LOG(INFO) << "pending_writes: " << pending_writes; std::this_thread::sleep_for(std::chrono::milliseconds(1000)); } LOG(INFO) << "Replication state shutdown, store sequence: " << store->get_latest_seq_number(); std::unique_lock lock(node_mutex); if (node) { LOG(INFO) << "node->shutdown"; node->shutdown(nullptr); // Blocking this thread until the node is eventually down. LOG(INFO) << "node->join"; node->join(); delete node; node = nullptr; } } void ReplicationState::persist_applying_index() { std::shared_lock lock(node_mutex); if(node == nullptr) { return ; } lock.unlock(); batched_indexer->persist_applying_index(); } int64_t ReplicationState::get_num_queued_writes() { return batched_indexer->get_queued_writes(); } bool ReplicationState::is_leader() { std::shared_lock lock(node_mutex); if(!node) { return false; } return node->is_leader(); } nlohmann::json ReplicationState::get_status() { nlohmann::json status; std::shared_lock lock(node_mutex); if(!node) { // `node` is not yet initialized (probably loading snapshot) status["state"] = "NOT_READY"; status["committed_index"] = 0; status["queued_writes"] = 0; return status; } braft::NodeStatus node_status; node->get_status(&node_status); lock.unlock(); status["state"] = braft::state2str(node_status.state); status["committed_index"] = node_status.committed_index; status["queued_writes"] = batched_indexer->get_queued_writes(); return status; } void ReplicationState::do_snapshot(const std::string& nodes) { auto current_ts = std::time(nullptr); if(current_ts - last_snapshot_ts < snapshot_interval_s) { //LOG(INFO) << "Skipping snapshot: not enough time has elapsed."; return; } LOG(INFO) << "Snapshot timer is active, current_ts: " << current_ts << ", last_snapshot_ts: " << last_snapshot_ts; if(is_leader()) { // run the snapshot only if there are no other recovering followers std::vector<braft::PeerId> peers; braft::Configuration peer_config; peer_config.parse_from(nodes); peer_config.list_peers(&peers); std::shared_lock lock(node_mutex); std::string my_addr = node->node_id().peer_id.to_string(); lock.unlock(); //LOG(INFO) << "my_addr: " << my_addr; bool all_peers_healthy = true; // iterate peers and check health status for(const auto& peer: peers) { const std::string& peer_addr = peer.to_string(); //LOG(INFO) << "do_snapshot, peer_addr: " << peer_addr; if(my_addr == peer_addr) { // skip self //LOG(INFO) << "do_snapshot: skipping self, peer_addr: " << peer_addr; continue; } const std::string protocol = api_uses_ssl ? "https" : "http"; std::string url = get_node_url_path(peer_addr, "/health", protocol); std::string api_res; std::map<std::string, std::string> res_headers; long status_code = HttpClient::get_response(url, api_res, res_headers, {}, 5*1000, true); bool peer_healthy = (status_code == 200); //LOG(INFO) << "do_snapshot, status_code: " << status_code; if(!peer_healthy) { LOG(WARNING) << "Peer " << peer_addr << " reported unhealthy during snapshot pre-check."; } all_peers_healthy = all_peers_healthy && peer_healthy; } if(!all_peers_healthy) { LOG(WARNING) << "Unable to trigger snapshot as one or more of the peers reported unhealthy."; return ; } } TimedSnapshotClosure* snapshot_closure = new TimedSnapshotClosure(this); std::shared_lock lock(node_mutex); node->snapshot(snapshot_closure); last_snapshot_ts = current_ts; } bool ReplicationState::get_ext_snapshot_succeeded() { return ext_snapshot_succeeded; } std::string ReplicationState::get_leader_url() const { std::shared_lock lock(node_mutex); if(!node) { LOG(ERROR) << "Could not get leader url as node is not initialized!"; return ""; } if(node->leader_id().is_empty()) { LOG(ERROR) << "Could not get leader url, as node does not have a leader!"; return ""; } const std::string & leader_addr = node->leader_id().to_string(); lock.unlock(); const std::string protocol = api_uses_ssl ? "https" : "http"; return get_node_url_path(leader_addr, "/", protocol); } void ReplicationState::decr_pending_writes() { pending_writes--; } void TimedSnapshotClosure::Run() { // Auto delete this after Done() std::unique_ptr<TimedSnapshotClosure> self_guard(this); if(status().ok()) { LOG(INFO) << "Timed snapshot succeeded!"; } else { LOG(ERROR) << "Timed snapshot failed, error: " << status().error_str() << ", code: " << status().error_code(); } } void OnDemandSnapshotClosure::Run() { // Auto delete this after Done() std::unique_ptr<OnDemandSnapshotClosure> self_guard(this); replication_state->wait(); // until on demand snapshotting completes auto ext_snapshot_path = replication_state->get_ext_snapshot_path(); replication_state->set_ext_snapshot_path(""); req->last_chunk_aggregate = true; res->final = true; nlohmann::json response; uint32_t status_code; if(status().ok() && (ext_snapshot_path.empty() || replication_state->get_ext_snapshot_succeeded())) { LOG(INFO) << "On demand snapshot succeeded!"; status_code = 201; response["success"] = true; } else { LOG(ERROR) << "On demand snapshot failed, error: "; if(replication_state->get_ext_snapshot_succeeded()) { LOG(ERROR) << status().error_str() << ", code: " << status().error_code(); } else { LOG(ERROR) << "Copy failed."; } status_code = 500; response["success"] = false; response["error"] = status().error_str(); } res->status_code = status_code; res->body = response.dump(); auto req_res = new async_req_res_t(req, res, true); replication_state->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res); // wait for response to be sent res->wait(); }
46,578
C++
.cpp
962
40.037422
125
0.624162
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,813
event_manager.cpp
typesense_typesense/src/event_manager.cpp
#include <analytics_manager.h> #include "event_manager.h" Option<bool> EventManager::add_event(const nlohmann::json& event, const std::string& client_ip) { /* Sample event payload: { "type": "search", "data": { "q": "Nike shoes", "collections": ["products"] } } */ if(!event.contains("type")) { return Option<bool>(404, "key `type` not found."); } const auto& event_type_val = event[EVENT_TYPE]; if(event_type_val.is_string()) { const std::string& event_type = event_type_val.get<std::string>(); if(event_type == AnalyticsManager::CLICK_EVENT || event_type == AnalyticsManager::CONVERSION_EVENT || event_type == AnalyticsManager::VISIT_EVENT || event_type == AnalyticsManager::CUSTOM_EVENT || event_type == AnalyticsManager::SEARCH_EVENT) { if(!event.contains(EVENT_DATA)) { return Option<bool>(404, "key `data` not found."); } const auto& event_data_val = event[EVENT_DATA]; if(!event.contains(EVENT_NAME)) { return Option<bool>(404, "key `name` not found."); } const auto& event_name = event[EVENT_NAME]; if(!event_data_val.is_object()) { return Option<bool>(400, "data is not object."); } if(event_type == AnalyticsManager::SEARCH_EVENT) { if(!event_data_val.contains("user_id") || !event_data_val["user_id"].is_string()) { return Option<bool>(400, "search event json data fields should contain `user_id` as string value."); } if(!event_data_val.contains("q") || !event_data_val["q"].is_string()) { return Option<bool>(400, "search event json data fields should contain `q` as string value."); } } else { if(!event_data_val.contains("doc_id") || !event_data_val["doc_id"].is_string()) { return Option<bool>(400, "event should have 'doc_id' as string value."); } if(event_data_val.contains("collection") && !event_data_val["collection"].is_string()) { return Option<bool>(400, "'collection' should be a string value."); } if(!event_data_val.contains("user_id") || !event_data_val["user_id"].is_string()) { return Option<bool>(400, "event should have 'user_id' as string value."); } if(event_data_val.contains("q") && !event_data_val["q"].is_string()) { return Option<bool>(400, "'q' should be a string value."); } } auto op = AnalyticsManager::get_instance().add_event(client_ip, event_type, event_name, event_data_val); if(!op.ok()) { return Option<bool>(op.code(), op.error()); } } else { return Option<bool>(404, "event_type " + event_type + " not found."); } } else { return Option<bool>(400, "`event_type` value should be string."); } return Option(true); }
3,315
C++
.cpp
68
35.264706
116
0.520421
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,814
tsconfig.cpp
typesense_typesense/src/tsconfig.cpp
#include "option.h" #include "json.hpp" #include "tsconfig.h" #include "file_utils.h" #include <fstream> Option<bool> Config::update_config(const nlohmann::json& req_json) { bool found_config = false; if(req_json.count("log-slow-requests-time-ms") != 0) { if(!req_json["log-slow-requests-time-ms"].is_number_integer()) { return Option<bool>(400, "Configuration `log-slow-requests-time-ms` must be an integer."); } set_log_slow_requests_time_ms(req_json["log-slow-requests-time-ms"].get<int>()); found_config = true; } if(req_json.count("log-slow-searches-time-ms") != 0) { if(!req_json["log-slow-searches-time-ms"].is_number_integer()) { return Option<bool>(400, "Configuration `log-slow-searches-time-ms` must be an integer."); } set_log_slow_searches_time_ms(req_json["log-slow-searches-time-ms"].get<int>()); found_config = true; } if(req_json.count("enable-search-logging") != 0) { if(!req_json["enable-search-logging"].is_boolean()) { return Option<bool>(400, "Configuration `enable-search-logging` must be a boolean."); } set_enable_search_logging(req_json["enable-search-logging"].get<bool>()); found_config = true; } if(req_json.count("healthy-read-lag") != 0) { if(!req_json["healthy-read-lag"].is_number_integer()) { return Option<bool>(400, "Configuration `healthy-read-lag` must be a positive integer."); } int read_lag = req_json["healthy-read-lag"].get<int>(); if(read_lag <= 0) { return Option<bool>(400, "Configuration `healthy-read-lag` must be a positive integer."); } set_healthy_read_lag(read_lag); found_config = true; } if(req_json.count("healthy-write-lag") != 0) { if(!req_json["healthy-write-lag"].is_number_integer()) { return Option<bool>(400, "Configuration `healthy-write-lag` must be an integer."); } int write_lag = req_json["healthy-write-lag"].get<int>(); if(write_lag <= 0) { return Option<bool>(400, "Configuration `healthy-write-lag` must be a positive integer."); } set_healthy_write_lag(write_lag); found_config = true; } if(req_json.count("cache-num-entries") != 0) { if(!req_json["cache-num-entries"].is_number_integer()) { return Option<bool>(400, "Configuration `cache-num-entries` must be an integer."); } int cache_entries_num = req_json["cache-num-entries"].get<int>(); if(cache_entries_num <= 0) { return Option<bool>(400, "Configuration `cache-num-entries` must be a positive integer."); } set_cache_num_entries(cache_entries_num); found_config = true; } if(req_json.count("skip-writes") != 0) { if(!req_json["skip-writes"].is_boolean()) { return Option<bool>(400, ("Configuration `skip-writes` must be a boolean.")); } bool skip_writes = req_json["skip-writes"].get<bool>(); set_skip_writes(skip_writes); found_config = true; } return Option<bool>(true); } Option<std::string> Config::fetch_file_contents(const std::string & file_path) { if(!file_exists(file_path)) { return Option<std::string>(404, std::string("File does not exist at: ") + file_path); } std::ifstream infile(file_path); std::string content((std::istreambuf_iterator<char>(infile)), (std::istreambuf_iterator<char>())); infile.close(); if(content.empty()) { return Option<std::string>(400, std::string("Empty file at: ") + file_path); } return Option<std::string>(content); } Option<std::string> Config::fetch_nodes_config(const std::string& path_to_nodes) { std::string nodes_config; if(!path_to_nodes.empty()) { const Option<std::string> & nodes_op = fetch_file_contents(path_to_nodes); if(!nodes_op.ok()) { return Option<std::string>(500, "Error reading file containing nodes configuration: " + nodes_op.error()); } else { nodes_config = nodes_op.get(); if(nodes_config.empty()) { return Option<std::string>(500, "File containing nodes configuration is empty."); } else { nodes_config = nodes_op.get(); } } } return Option<std::string>(nodes_config); } void Config::load_config_env() { this->data_dir = get_env("TYPESENSE_DATA_DIR"); this->log_dir = get_env("TYPESENSE_LOG_DIR"); this->api_key = get_env("TYPESENSE_API_KEY"); // @deprecated this->search_only_api_key = get_env("TYPESENSE_SEARCH_ONLY_API_KEY"); this->health_rusage_api_key = get_env("TYPESENSE_HEALTH_RUSAGE_API_KEY"); if(!get_env("TYPESENSE_LISTEN_ADDRESS").empty()) { this->api_address = get_env("TYPESENSE_LISTEN_ADDRESS"); } if(!get_env("TYPESENSE_LISTEN_PORT").empty()) { this->api_port = std::stoi(get_env("TYPESENSE_LISTEN_PORT")); } if(!get_env("TYPESENSE_API_ADDRESS").empty()) { this->api_address = get_env("TYPESENSE_API_ADDRESS"); } if(!get_env("TYPESENSE_API_PORT").empty()) { this->api_port = std::stoi(get_env("TYPESENSE_API_PORT")); } if(!get_env("TYPESENSE_PEERING_ADDRESS").empty()) { this->peering_address = get_env("TYPESENSE_PEERING_ADDRESS"); } if(!get_env("TYPESENSE_PEERING_PORT").empty()) { this->peering_port = std::stoi(get_env("TYPESENSE_PEERING_PORT")); } if(!get_env("TYPESENSE_PEERING_SUBNET").empty()) { this->peering_subnet = get_env("TYPESENSE_PEERING_SUBNET"); } this->nodes = get_env("TYPESENSE_NODES"); this->master = get_env("TYPESENSE_MASTER"); this->ssl_certificate = get_env("TYPESENSE_SSL_CERTIFICATE"); this->ssl_certificate_key = get_env("TYPESENSE_SSL_CERTIFICATE_KEY"); std::string enable_cors_str = get_env("TYPESENSE_ENABLE_CORS"); StringUtils::toupper(enable_cors_str); this->enable_cors = ("TRUE" == enable_cors_str || enable_cors_str.empty()) ? true : false; std::string cors_domains_value = get_env("TYPESENSE_CORS_DOMAINS"); set_cors_domains(cors_domains_value); if(!get_env("TYPESENSE_MAX_MEMORY_RATIO").empty()) { this->max_memory_ratio = std::stof(get_env("TYPESENSE_MAX_MEMORY_RATIO")); } if(!get_env("TYPESENSE_SNAPSHOT_INTERVAL_SECONDS").empty()) { this->snapshot_interval_seconds = std::stoi(get_env("TYPESENSE_SNAPSHOT_INTERVAL_SECONDS")); } if(!get_env("TYPESENSE_HEALTHY_READ_LAG").empty()) { this->healthy_read_lag = std::stoi(get_env("TYPESENSE_HEALTHY_READ_LAG")); } if(!get_env("TYPESENSE_HEALTHY_WRITE_LAG").empty()) { this->healthy_write_lag = std::stoi(get_env("TYPESENSE_HEALTHY_WRITE_LAG")); } if(!get_env("TYPESENSE_LOG_SLOW_REQUESTS_TIME_MS").empty()) { this->log_slow_requests_time_ms = std::stoi(get_env("TYPESENSE_LOG_SLOW_REQUESTS_TIME_MS")); } if(!get_env("TYPESENSE_LOG_SLOW_SEARCHES_TIME_MS").empty()) { this->log_slow_searches_time_ms = std::stoi(get_env("TYPESENSE_LOG_SLOW_SEARCHES_TIME_MS")); } if(!get_env("TYPESENSE_NUM_COLLECTIONS_PARALLEL_LOAD").empty()) { this->num_collections_parallel_load = std::stoi(get_env("TYPESENSE_NUM_COLLECTIONS_PARALLEL_LOAD")); } if(!get_env("TYPESENSE_NUM_DOCUMENTS_PARALLEL_LOAD").empty()) { this->num_documents_parallel_load = std::stoi(get_env("TYPESENSE_NUM_DOCUMENTS_PARALLEL_LOAD")); } if(!get_env("TYPESENSE_CACHE_NUM_ENTRIES").empty()) { this->cache_num_entries = std::stoi(get_env("TYPESENSE_CACHE_NUM_ENTRIES")); } if(!get_env("TYPESENSE_ANALYTICS_FLUSH_INTERVAL").empty()) { this->analytics_flush_interval = std::stoi(get_env("TYPESENSE_ANALYTICS_FLUSH_INTERVAL")); } if(!get_env("TYPESENSE_HOUSEKEEPING_INTERVAL").empty()) { this->housekeeping_interval = std::stoi(get_env("TYPESENSE_HOUSEKEEPING_INTERVAL")); } if(!get_env("TYPESENSE_DB_COMPACTION_INTERVAL").empty()) { this->db_compaction_interval = std::stoi(get_env("TYPESENSE_DB_COMPACTION_INTERVAL")); } if(!get_env("TYPESENSE_THREAD_POOL_SIZE").empty()) { this->thread_pool_size = std::stoi(get_env("TYPESENSE_THREAD_POOL_SIZE")); } if(!get_env("TYPESENSE_SSL_REFRESH_INTERVAL_SECONDS").empty()) { this->ssl_refresh_interval_seconds = std::stoi(get_env("TYPESENSE_SSL_REFRESH_INTERVAL_SECONDS")); } if(!get_env("TYPESENSE_SNAPSHOT_MAX_BYTE_COUNT_PER_RPC").empty()) { this->snapshot_max_byte_count_per_rpc = std::stoi(get_env("TYPESENSE_SNAPSHOT_MAX_BYTE_COUNT_PER_RPC")); } this->enable_access_logging = ("TRUE" == get_env("TYPESENSE_ENABLE_ACCESS_LOGGING")); this->enable_search_analytics = ("TRUE" == get_env("TYPESENSE_ENABLE_SEARCH_ANALYTICS")); this->enable_search_logging = ("TRUE" == get_env("TYPESENSE_ENABLE_SEARCH_LOGGING")); if(!get_env("TYPESENSE_DISK_USED_MAX_PERCENTAGE").empty()) { this->disk_used_max_percentage = std::stoi(get_env("TYPESENSE_DISK_USED_MAX_PERCENTAGE")); } if(!get_env("TYPESENSE_MEMORY_USED_MAX_PERCENTAGE").empty()) { this->memory_used_max_percentage = std::stoi(get_env("TYPESENSE_MEMORY_USED_MAX_PERCENTAGE")); } if(!get_env("TYPESENSE_FILTER_BY_MAX_OPS").empty()) { this->filter_by_max_ops = std::stoi(get_env("TYPESENSE_FILTER_BY_MAX_OPS")); } this->skip_writes = ("TRUE" == get_env("TYPESENSE_SKIP_WRITES")); this->enable_lazy_filter = ("TRUE" == get_env("TYPESENSE_ENABLE_LAZY_FILTER")); this->reset_peers_on_error = ("TRUE" == get_env("TYPESENSE_RESET_PEERS_ON_ERROR")); if(!get_env("TYPESENSE_MAX_PER_PAGE").empty()) { this->max_per_page = std::stoi(get_env("TYPESENSE_MAX_PER_PAGE")); } if(!get_env("TYPESENSE_ANALYTICS_DIR").empty()) { this->analytics_dir = get_env("TYPESENSE_ANALYTICS_DIR"); } if(!get_env("TYPESENSE_ANALYTICS_DB_TTL").empty()) { this->analytics_db_ttl = std::stoi(get_env("TYPESENSE_ANALYTICS_DB_TTL")); } if(!get_env("TYPESENSE_ANALYTICS_MINUTE_RATE_LIMIT").empty()) { this->analytics_minute_rate_limit = std::stoi(get_env("TYPESENSE_ANALYTICS_MINUTE_RATE_LIMIT")); } } void Config::load_config_file(cmdline::parser& options) { this->config_file = options.exist("config") ? options.get<std::string>("config") : ""; if(!options.exist("config")) { config_file_validity = 0; return; } this->config_file = options.get<std::string>("config"); INIReader reader(this->config_file); if (reader.ParseError() != 0) { LOG(ERROR) << "Error while parsing config file, code = " << reader.ParseError(); config_file_validity = -1; return ; } config_file_validity = 1; if(reader.Exists("server", "data-dir")) { this->data_dir = reader.Get("server", "data-dir", ""); } if(reader.Exists("server", "log-dir")) { this->log_dir = reader.Get("server", "log-dir", ""); } if(reader.Exists("server", "analytics-dir")) { this->analytics_dir = reader.Get("server", "analytics-dir", ""); } if(reader.Exists("server", "analytics-db-ttl")) { this->analytics_db_ttl = reader.GetInteger("server", "analytics-db-ttl", 0); } if(reader.Exists("server", "analytics-minute-rate-limit")) { this->analytics_minute_rate_limit = reader.GetInteger("server", "analytics-minute-rate-limit", 5); } if(reader.Exists("server", "api-key")) { this->api_key = reader.Get("server", "api-key", ""); } // @deprecated if(reader.Exists("server", "search-only-api-key")) { this->search_only_api_key = reader.Get("server", "search-only-api-key", ""); } if(reader.Exists("server", "health-rusage-api-key")) { this->health_rusage_api_key = reader.Get("server", "health-rusage-api-key", ""); } if(reader.Exists("server", "listen-address")) { this->api_address = reader.Get("server", "listen-address", ""); } if(reader.Exists("server", "api-address")) { this->api_address = reader.Get("server", "api-address", ""); } if(reader.Exists("server", "master")) { this->master = reader.Get("server", "master", ""); } if(reader.Exists("server", "ssl-certificate")) { this->ssl_certificate = reader.Get("server", "ssl-certificate", ""); } if(reader.Exists("server", "ssl-certificate-key")) { this->ssl_certificate_key = reader.Get("server", "ssl-certificate-key", ""); } if(reader.Exists("server", "listen-port")) { this->api_port = reader.GetInteger("server", "listen-port", 8108); } if(reader.Exists("server", "api-port")) { this->api_port = reader.GetInteger("server", "api-port", 8108); } if(reader.Exists("server", "enable-cors")) { auto enable_cors_value = reader.Get("server", "enable-cors", "true"); StringUtils::tolowercase(enable_cors_value); this->enable_cors = enable_cors_value == "true"; } if(reader.Exists("server", "cors-domains")) { std::string cors_value = reader.Get("server", "cors-domains", ""); set_cors_domains(cors_value); } if(reader.Exists("server", "peering-address")) { this->peering_address = reader.Get("server", "peering-address", ""); } if(reader.Exists("server", "peering-port")) { this->peering_port = reader.GetInteger("server", "peering-port", 8107); } if(reader.Exists("server", "peering-subnet")) { this->peering_subnet = reader.Get("server", "peering-subnet", ""); } if(reader.Exists("server", "nodes")) { this->nodes = reader.Get("server", "nodes", ""); } if(reader.Exists("server", "max-memory-ratio")) { this->max_memory_ratio = (float) reader.GetReal("server", "max-memory-ratio", 1.0f); } if(reader.Exists("server", "snapshot-interval-seconds")) { this->snapshot_interval_seconds = (int) reader.GetInteger("server", "snapshot-interval-seconds", 3600); } if(reader.Exists("server", "snapshot-max-byte-count-per-rpc")) { this->snapshot_max_byte_count_per_rpc = (int) reader.GetInteger("server", "snapshot-max-byte-count-per-rpc", 4194304); } if(reader.Exists("server", "healthy-read-lag")) { this->healthy_read_lag = (size_t) reader.GetInteger("server", "healthy-read-lag", 1000); } if(reader.Exists("server", "healthy-write-lag")) { this->healthy_write_lag = (size_t) reader.GetInteger("server", "healthy-write-lag", 100); } if(reader.Exists("server", "log-slow-requests-time-ms")) { this->log_slow_requests_time_ms = (int) reader.GetInteger("server", "log-slow-requests-time-ms", -1); } if(reader.Exists("server", "log-slow-searches-time-ms")) { this->log_slow_searches_time_ms = (int) reader.GetInteger("server", "log-slow-searches-time-ms", 30*1000); } if(reader.Exists("server", "num-collections-parallel-load")) { this->num_collections_parallel_load = (int) reader.GetInteger("server", "num-collections-parallel-load", 0); } if(reader.Exists("server", "num-documents-parallel-load")) { this->num_documents_parallel_load = (int) reader.GetInteger("server", "num-documents-parallel-load", 1000); } if(reader.Exists("server", "cache-num-entries")) { this->cache_num_entries = (int) reader.GetInteger("server", "cache-num-entries", 1000); } if(reader.Exists("server", "analytics-flush-interval")) { this->analytics_flush_interval = (int) reader.GetInteger("server", "analytics-flush-interval", 3600); } if(reader.Exists("server", "housekeeping-interval")) { this->housekeeping_interval = (int) reader.GetInteger("server", "housekeeping-interval", 1800); } if(reader.Exists("server", "db-compaction-interval")) { this->db_compaction_interval = (int) reader.GetInteger("server", "db-compaction-interval", 0); } if(reader.Exists("server", "thread-pool-size")) { this->thread_pool_size = (int) reader.GetInteger("server", "thread-pool-size", 0); } if(reader.Exists("server", "ssl-refresh-interval-seconds")) { this->ssl_refresh_interval_seconds = (int) reader.GetInteger("server", "ssl-refresh-interval-seconds", 8 * 60 * 60); } if(reader.Exists("server", "enable-access-logging")) { auto enable_access_logging_str = reader.Get("server", "enable-access-logging", "false"); this->enable_access_logging = (enable_access_logging_str == "true"); } if(reader.Exists("server", "enable-search-analytics")) { auto enable_search_analytics_str = reader.Get("server", "enable-search-analytics", "false"); this->enable_search_analytics = (enable_search_analytics_str == "true"); } if(reader.Exists("server", "enable-search-logging")) { auto enable_search_logging_str = reader.Get("server", "enable-search-logging", "false"); this->enable_search_logging = (enable_search_logging_str == "true"); } if(reader.Exists("server", "disk-used-max-percentage")) { this->disk_used_max_percentage = (int) reader.GetInteger("server", "disk-used-max-percentage", 100); } if(reader.Exists("server", "memory-used-max-percentage")) { this->memory_used_max_percentage = (int) reader.GetInteger("server", "memory-used-max-percentage", 100); } if(reader.Exists("server", "enable-lazy-filter")) { auto enable_lazy_filter_str = reader.Get("server", "enable-lazy-filter", "false"); this->enable_lazy_filter = (enable_lazy_filter_str == "true"); } if(reader.Exists("server", "skip-writes")) { auto skip_writes_str = reader.Get("server", "skip-writes", "false"); this->skip_writes = (skip_writes_str == "true"); } if(reader.Exists("server", "reset-peers-on-error")) { auto reset_peers_on_error_str = reader.Get("server", "reset-peers-on-error", "false"); this->reset_peers_on_error = (reset_peers_on_error_str == "true"); } if(reader.Exists("server", "max-per-page")) { this->max_per_page = reader.GetInteger("server", "max-per-page", 250); } if(reader.Exists("server", "filter-by-max-ops")) { this->filter_by_max_ops = (uint16_t) reader.GetInteger("server", "filter-by-max-ops", FILTER_BY_DEFAULT_OPERATIONS); } } void Config::load_config_cmd_args(cmdline::parser& options) { if(options.exist("data-dir")) { this->data_dir = options.get<std::string>("data-dir"); } if(options.exist("log-dir")) { this->log_dir = options.get<std::string>("log-dir"); } if(options.exist("analytics-dir")) { this->analytics_dir = options.get<std::string>("analytics-dir"); } if(options.exist("analytics-db-ttl")) { this->analytics_db_ttl = options.get<uint32_t>("analytics-db-ttl"); } if(options.exist("analytics-minute-rate-limit")) { this->analytics_minute_rate_limit = options.get<uint32_t>("analytics-minute-rate-limit"); } if(options.exist("api-key")) { this->api_key = options.get<std::string>("api-key"); } // @deprecated if(options.exist("search-only-api-key")) { this->search_only_api_key = options.get<std::string>("search-only-api-key"); } if(options.exist("health-rusage-api-key")) { this->health_rusage_api_key = options.get<std::string>("health-rusage-api-key"); } if(options.exist("listen-address")) { this->api_address = options.get<std::string>("listen-address"); } if(options.exist("api-address")) { this->api_address = options.get<std::string>("api-address"); } if(options.exist("master")) { this->master = options.get<std::string>("master"); } if(options.exist("ssl-certificate")) { this->ssl_certificate = options.get<std::string>("ssl-certificate"); } if(options.exist("ssl-certificate-key")) { this->ssl_certificate_key = options.get<std::string>("ssl-certificate-key"); } if(options.exist("listen-port")) { this->api_port = options.get<uint32_t>("listen-port"); } if(options.exist("api-port")) { this->api_port = options.get<uint32_t>("api-port"); } if(options.exist("enable-cors")) { this->enable_cors = options.get<bool>("enable-cors"); } if(options.exist("cors-domains")) { std::string cors_domains_value = options.get<std::string>("cors-domains"); set_cors_domains(cors_domains_value); } if(options.exist("peering-address")) { this->peering_address = options.get<std::string>("peering-address"); } if(options.exist("peering-port")) { this->peering_port = options.get<uint32_t>("peering-port"); } if(options.exist("peering-subnet")) { this->peering_subnet = options.get<std::string>("peering-subnet"); } if(options.exist("nodes")) { this->nodes = options.get<std::string>("nodes"); } if(options.exist("max-memory-ratio")) { this->max_memory_ratio = options.get<float>("max-memory-ratio"); } if(options.exist("snapshot-interval-seconds")) { this->snapshot_interval_seconds = options.get<int>("snapshot-interval-seconds"); } if(options.exist("snapshot-max-byte-count-per-rpc")) { this->snapshot_max_byte_count_per_rpc = options.get<int>("snapshot-max-byte-count-per-rpc"); } if(options.exist("healthy-read-lag")) { this->healthy_read_lag = options.get<size_t>("healthy-read-lag"); } if(options.exist("healthy-write-lag")) { this->healthy_write_lag = options.get<size_t>("healthy-write-lag"); } if(options.exist("log-slow-requests-time-ms")) { this->log_slow_requests_time_ms = options.get<int>("log-slow-requests-time-ms"); } if(options.exist("log-slow-searches-time-ms")) { this->log_slow_searches_time_ms = options.get<int>("log-slow-searches-time-ms"); } if(options.exist("num-collections-parallel-load")) { this->num_collections_parallel_load = options.get<uint32_t>("num-collections-parallel-load"); } if(options.exist("num-documents-parallel-load")) { this->num_documents_parallel_load = options.get<uint32_t>("num-documents-parallel-load"); } if(options.exist("cache-num-entries")) { this->cache_num_entries = options.get<uint32_t>("cache-num-entries"); } if(options.exist("analytics-flush-interval")) { this->analytics_flush_interval = options.get<uint32_t>("analytics-flush-interval"); } if(options.exist("housekeeping-interval")) { this->housekeeping_interval = options.get<uint32_t>("housekeeping-interval"); } if(options.exist("db-compaction-interval")) { this->db_compaction_interval = options.get<uint32_t>("db-compaction-interval"); } if(options.exist("thread-pool-size")) { this->thread_pool_size = options.get<uint32_t>("thread-pool-size"); } if(options.exist("ssl-refresh-interval-seconds")) { this->ssl_refresh_interval_seconds = options.get<uint32_t>("ssl-refresh-interval-seconds"); } if(options.exist("enable-access-logging")) { this->enable_access_logging = options.get<bool>("enable-access-logging"); } if(options.exist("disk-used-max-percentage")) { this->disk_used_max_percentage = options.get<int>("disk-used-max-percentage"); } if(options.exist("memory-used-max-percentage")) { this->memory_used_max_percentage = options.get<int>("memory-used-max-percentage"); } if(options.exist("skip-writes")) { this->skip_writes = options.get<bool>("skip-writes"); } if(options.exist("reset-peers-on-error")) { this->reset_peers_on_error = options.get<bool>("reset-peers-on-error"); } if(options.exist("enable-search-analytics")) { this->enable_search_analytics = options.get<bool>("enable-search-analytics"); } if(options.exist("enable-lazy-filter")) { this->enable_lazy_filter = options.get<bool>("enable-lazy-filter"); } if(options.exist("enable-search-logging")) { this->enable_search_logging = options.get<bool>("enable-search-logging"); } if(options.exist("max-per-page")) { this->max_per_page = options.get<int>("max-per-page"); } if(options.exist("filter-by-max-ops")) { this->filter_by_max_ops = options.get<uint16_t>("filter-by-max-ops"); } }
24,768
C++
.cpp
515
41.227184
126
0.634258
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,815
stopwords_manager.cpp
typesense_typesense/src/stopwords_manager.cpp
#include "include/stopwords_manager.h" #include "include/tokenizer.h" void StopwordsManager::init(Store* _store) { store = _store; } spp::sparse_hash_map<std::string, stopword_struct_t> StopwordsManager::get_stopwords() const { std::shared_lock lock(mutex); return stopword_configs; } Option<bool> StopwordsManager::get_stopword(const std::string& stopword_name, stopword_struct_t& stopwords_struct) const { std::shared_lock lock(mutex); const auto& it = stopword_configs.find(stopword_name); if(it != stopword_configs.end()) { stopwords_struct = it->second; return Option<bool>(true); } return Option<bool>(404, "Stopword `" + stopword_name +"` not found."); } Option<bool> StopwordsManager::upsert_stopword(const std::string& stopword_name, const nlohmann::json& stopwords_json, bool write_to_store) { std::unique_lock lock(mutex); const char* STOPWORD_VALUES = "stopwords"; const char* STOPWORD_LOCALE = "locale"; std::string locale = ""; if(stopwords_json.count(STOPWORD_VALUES) == 0){ return Option<bool>(400, (std::string("Parameter `") + STOPWORD_VALUES + "` is required")); } if(stopwords_json[STOPWORD_VALUES].empty()) { return Option<bool>(400, (std::string("Parameter `") + STOPWORD_VALUES + "` is empty")); } if((!stopwords_json[STOPWORD_VALUES].is_array()) || (!stopwords_json[STOPWORD_VALUES][0].is_string())) { return Option<bool>(400, (std::string("Parameter `") + STOPWORD_VALUES + "` is required as string array value")); } if(stopwords_json.count(STOPWORD_LOCALE) != 0) { if (!stopwords_json[STOPWORD_LOCALE].is_string()) { return Option<bool>(400, (std::string("Parameter `") + STOPWORD_LOCALE + "` is required as string value")); } locale = stopwords_json[STOPWORD_LOCALE]; } if(write_to_store) { bool inserted = store->insert(get_stopword_key(stopword_name), stopwords_json.dump()); if (!inserted) { return Option<bool>(500, "Unable to insert into store."); } } std::vector<std::string> tokens; spp::sparse_hash_set<std::string> stopwords_set; const auto& stopwords = stopwords_json[STOPWORD_VALUES]; for (const auto &stopword: stopwords.items()) { const auto& val = stopword.value().get<std::string>(); Tokenizer(val, true, false, locale, {}, {}).tokenize(tokens); for(const auto& tok : tokens) { stopwords_set.emplace(tok); } tokens.clear(); } stopword_configs[stopword_name] = stopword_struct_t{stopword_name, stopwords_set, locale}; return Option<bool>(true); } std::string StopwordsManager::get_stopword_key(const std::string& stopword_name) { return std::string(STOPWORD_PREFIX) + "_" + stopword_name; } Option<bool> StopwordsManager::delete_stopword(const std::string& stopword_name) { std::unique_lock lock(mutex); if(stopword_configs.find(stopword_name) == stopword_configs.end()) { return Option<bool>(404, "Stopword `" + stopword_name + "` not found."); } stopword_configs.erase(stopword_name); bool removed = store->remove(get_stopword_key(stopword_name)); if(!removed) { return Option<bool>(500, "Unable to delete from store."); } return Option<bool>(true); } void StopwordsManager::dispose() { std::unique_lock lock(mutex); stopword_configs.clear(); } bool StopwordsManager::stopword_exists(const std::string &stopword) { std::shared_lock lock(mutex); return stopword_configs.find(stopword) != stopword_configs.end(); }
3,675
C++
.cpp
82
38.719512
122
0.659384
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,816
num_tree.cpp
typesense_typesense/src/num_tree.cpp
#include "num_tree.h" #include "parasort.h" #include "timsort.hpp" void num_tree_t::insert(int64_t value, uint32_t id, bool is_facet) { if (int64map.count(value) == 0) { int64map.emplace(value, SET_COMPACT_IDS(compact_id_list_t::create(1, {id}))); } else { auto ids = int64map[value]; if (!ids_t::contains(ids, id)) { ids_t::upsert(ids, id); int64map[value] = ids; } } } void num_tree_t::range_inclusive_search(int64_t start, int64_t end, uint32_t** ids, size_t& ids_len) { if(int64map.empty()) { return ; } auto it_start = int64map.lower_bound(start); // iter values will be >= start std::vector<uint32_t> consolidated_ids; while(it_start != int64map.end() && it_start->first <= end) { uint32_t* values = ids_t::uncompress(it_start->second); for(size_t i = 0; i < ids_t::num_ids(it_start->second); i++) { consolidated_ids.push_back(values[i]); } delete [] values; it_start++; } gfx::timsort(consolidated_ids.begin(), consolidated_ids.end()); uint32_t *out = nullptr; ids_len = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), *ids, ids_len, &out); delete [] *ids; *ids = out; } void num_tree_t::approx_range_inclusive_search_count(int64_t start, int64_t end, uint32_t& ids_len) { if (int64map.empty()) { return; } auto it_start = int64map.lower_bound(start); // iter values will be >= start while (it_start != int64map.end() && it_start->first <= end) { uint32_t val_ids = ids_t::num_ids(it_start->second); ids_len += val_ids; it_start++; } } bool num_tree_t::range_inclusive_contains(const int64_t& start, const int64_t& end, const uint32_t& id) const { if (int64map.empty()) { return false; } auto it_start = int64map.lower_bound(start); // iter values will be >= start while (it_start != int64map.end() && it_start->first <= end) { if (ids_t::contains(it_start->second, id)) { return true; } } return false; } void num_tree_t::range_inclusive_contains(const int64_t& start, const int64_t& end, const uint32_t& context_ids_length, uint32_t* const& context_ids, size_t& result_ids_len, uint32_t*& result_ids) const { if (int64map.empty()) { return; } std::vector<uint32_t> consolidated_ids; consolidated_ids.reserve(context_ids_length); for (uint32_t i = 0; i < context_ids_length; i++) { if (range_inclusive_contains(start, end, context_ids[i])) { consolidated_ids.push_back(context_ids[i]); } } uint32_t *out = nullptr; result_ids_len = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), result_ids, result_ids_len, &out); delete [] result_ids; result_ids = out; } size_t num_tree_t::get(int64_t value, std::vector<uint32_t>& geo_result_ids) { const auto& it = int64map.find(value); if(it == int64map.end()) { return 0; } uint32_t* ids = ids_t::uncompress(it->second); for(size_t i = 0; i < ids_t::num_ids(it->second); i++) { geo_result_ids.push_back(ids[i]); } delete [] ids; return ids_t::num_ids(it->second); } void num_tree_t::search(NUM_COMPARATOR comparator, int64_t value, uint32_t** ids, size_t& ids_len) { if(int64map.empty()) { return ; } if(comparator == EQUALS) { const auto& it = int64map.find(value); if(it != int64map.end()) { uint32_t *out = nullptr; uint32_t* val_ids = ids_t::uncompress(it->second); ids_len = ArrayUtils::or_scalar(val_ids, ids_t::num_ids(it->second), *ids, ids_len, &out); delete[] *ids; *ids = out; delete[] val_ids; } } else if(comparator == GREATER_THAN || comparator == GREATER_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); if(iter_ge_value == int64map.end()) { return ; } if(comparator == GREATER_THAN && iter_ge_value->first == value) { iter_ge_value++; } std::vector<uint32_t> consolidated_ids; while(iter_ge_value != int64map.end()) { ids_t::uncompress(iter_ge_value->second, consolidated_ids); iter_ge_value++; } gfx::timsort(consolidated_ids.begin(), consolidated_ids.end()); consolidated_ids.erase(unique(consolidated_ids.begin(), consolidated_ids.end()), consolidated_ids.end()); uint32_t *out = nullptr; ids_len = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), *ids, ids_len, &out); delete [] *ids; *ids = out; } else if(comparator == LESS_THAN || comparator == LESS_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); std::vector<uint32_t> consolidated_ids; auto it = int64map.begin(); while(it != iter_ge_value) { ids_t::uncompress(it->second, consolidated_ids); it++; } // for LESS_THAN_EQUALS, check if last iter entry is equal to value if(it != int64map.end() && comparator == LESS_THAN_EQUALS && it->first == value) { ids_t::uncompress(it->second, consolidated_ids); } gfx::timsort(consolidated_ids.begin(), consolidated_ids.end()); consolidated_ids.erase(unique(consolidated_ids.begin(), consolidated_ids.end()), consolidated_ids.end()); uint32_t *out = nullptr; ids_len = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), *ids, ids_len, &out); delete [] *ids; *ids = out; } } std::vector<void*> num_tree_t::search(const NUM_COMPARATOR& comparator, const int64_t& value, const int64_t& range_end_value) const { if (int64map.empty()) { return {}; } std::vector<void*> raw_id_lists; auto const& range_start_value = value; if (comparator == EQUALS || comparator == NOT_EQUALS) { const auto& it = int64map.find(value); if (it == int64map.end()) { return {}; } raw_id_lists.emplace_back(it->second); } else if (comparator == GREATER_THAN || comparator == GREATER_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); if (iter_ge_value == int64map.end()) { return {}; } if (comparator == GREATER_THAN && iter_ge_value->first == value) { iter_ge_value++; } while (iter_ge_value != int64map.end()) { raw_id_lists.emplace_back(iter_ge_value->second); iter_ge_value++; } } else if (comparator == LESS_THAN || comparator == LESS_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); auto it = int64map.begin(); while (it != iter_ge_value) { raw_id_lists.emplace_back(it->second); it++; } // for LESS_THAN_EQUALS, check if last iter entry is equal to value if (it != int64map.end() && comparator == LESS_THAN_EQUALS && it->first == value) { raw_id_lists.emplace_back(it->second); } } else if (comparator == RANGE_INCLUSIVE) { auto it_start = int64map.lower_bound(range_start_value); // iter values will be >= range_start_value while (it_start != int64map.end() && it_start->first <= range_end_value) { raw_id_lists.emplace_back(it_start->second); it_start++; } } return raw_id_lists; } uint32_t num_tree_t::approx_search_count(NUM_COMPARATOR comparator, int64_t value) { if (int64map.empty()) { return 0; } uint32_t ids_len = 0; if (comparator == EQUALS) { const auto& it = int64map.find(value); if (it != int64map.end()) { uint32_t val_ids = ids_t::num_ids(it->second); ids_len += val_ids; } } else if (comparator == GREATER_THAN || comparator == GREATER_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); if (iter_ge_value == int64map.end()) { return 0; } if (comparator == GREATER_THAN && iter_ge_value->first == value) { iter_ge_value++; } while (iter_ge_value != int64map.end()) { uint32_t val_ids = ids_t::num_ids(iter_ge_value->second); ids_len += val_ids; iter_ge_value++; } } else if (comparator == LESS_THAN || comparator == LESS_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); auto it = int64map.begin(); while (it != iter_ge_value) { uint32_t val_ids = ids_t::num_ids(it->second); ids_len += val_ids; it++; } // for LESS_THAN_EQUALS, check if last iter entry is equal to value if (it != int64map.end() && comparator == LESS_THAN_EQUALS && it->first == value) { uint32_t val_ids = ids_t::num_ids(it->second); ids_len += val_ids; } } return ids_len; } void num_tree_t::remove(uint64_t value, uint32_t id) { if(int64map.count(value) != 0) { void* arr = int64map[value]; ids_t::erase(arr, id); if(ids_t::num_ids(arr) == 0) { ids_t::destroy_list(arr); int64map.erase(value); } else { int64map[value] = arr; } } } void num_tree_t::contains(const NUM_COMPARATOR& comparator, const int64_t& value, const uint32_t& context_ids_length, uint32_t* const& context_ids, size_t& result_ids_len, uint32_t*& result_ids) const { if (int64map.empty()) { return; } std::vector<uint32_t> consolidated_ids; consolidated_ids.reserve(context_ids_length); for (uint32_t i = 0; i < context_ids_length; i++) { if (comparator == EQUALS) { if (contains(value, context_ids[i])) { consolidated_ids.push_back(context_ids[i]); } } else if (comparator == GREATER_THAN || comparator == GREATER_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); if (iter_ge_value == int64map.end()) { continue; } if (comparator == GREATER_THAN && iter_ge_value->first == value) { iter_ge_value++; } while (iter_ge_value != int64map.end()) { if (contains(iter_ge_value->first, context_ids[i])) { consolidated_ids.push_back(context_ids[i]); break; } iter_ge_value++; } } else if(comparator == LESS_THAN || comparator == LESS_THAN_EQUALS) { // iter entries will be >= value, or end() if all entries are before value auto iter_ge_value = int64map.lower_bound(value); auto it = int64map.begin(); while (it != iter_ge_value) { if (contains(it->first, context_ids[i])) { consolidated_ids.push_back(context_ids[i]); break; } it++; } // for LESS_THAN_EQUALS, check if last iter entry is equal to value if (it != int64map.end() && comparator == LESS_THAN_EQUALS && it->first == value) { if (contains(it->first, context_ids[i])) { consolidated_ids.push_back(context_ids[i]); break; } } } } gfx::timsort(consolidated_ids.begin(), consolidated_ids.end()); consolidated_ids.erase(unique(consolidated_ids.begin(), consolidated_ids.end()), consolidated_ids.end()); uint32_t *out = nullptr; result_ids_len = ArrayUtils::or_scalar(&consolidated_ids[0], consolidated_ids.size(), result_ids, result_ids_len, &out); delete[] result_ids; result_ids = out; } void num_tree_t::seq_ids_outside_top_k(size_t k, std::vector<uint32_t> &seq_ids) { size_t ids_skipped = 0; for (auto iter = int64map.rbegin(); iter != int64map.rend(); ++iter) { auto num_ids = ids_t::num_ids(iter->second); if(ids_skipped > k) { ids_t::uncompress(iter->second, seq_ids); } else if((ids_skipped + num_ids) > k) { // this element hits the limit, so we pick partial IDs to satisfy k std::vector<uint32_t> ids; ids_t::uncompress(iter->second, ids); for(size_t i = 0; i < ids.size(); i++) { auto seq_id = ids[i]; if(ids_skipped + i >= k) { seq_ids.push_back(seq_id); } } } ids_skipped += num_ids; } } std::pair<int64_t, int64_t> num_tree_t::get_min_max(const uint32_t* result_ids, size_t result_ids_len) { int64_t min, max; //first traverse from top to find min for(auto int64map_it = int64map.begin(); int64map_it != int64map.end(); ++int64map_it) { if(ids_t::intersect_count(int64map_it->second, result_ids, result_ids_len)) { min = int64map_it->first; break; } } //traverse from end to find max for(auto int64map_it = int64map.rbegin(); int64map_it != int64map.rend(); ++int64map_it) { if(ids_t::intersect_count(int64map_it->second, result_ids, result_ids_len)) { max = int64map_it->first; break; } } return std::make_pair(min, max); } size_t num_tree_t::size() { return int64map.size(); } num_tree_t::~num_tree_t() { for(auto& kv: int64map) { ids_t::destroy_list(kv.second); } } num_tree_t::iterator_t::iterator_t(num_tree_t* num_tree, NUM_COMPARATOR comparator, int64_t value) { if (num_tree == nullptr || num_tree->int64map.empty() || comparator != EQUALS) { is_valid = false; return; } const auto& it = num_tree->int64map.find(value); if (it == num_tree->int64map.end()) { is_valid = false; return; } auto obj = it->second; is_compact_id_list = IS_COMPACT_IDS(obj); if (is_compact_id_list) { id_list_array_len = ids_t::num_ids(obj); id_list_array = ids_t::uncompress(obj); approx_filter_ids_length = id_list_array_len; is_valid = id_list_array_len > index; if (is_valid) { seq_id = id_list_array[index]; } } else { id_list = (id_list_t*)(obj); id_list_iterator = id_list->new_iterator(); approx_filter_ids_length = id_list->num_ids(); is_valid = id_list_iterator.valid(); if (is_valid) { seq_id = id_list_iterator.id(); } } } int num_tree_t::iterator_t::is_id_valid(uint32_t id) { if (!is_valid) { return -1; } skip_to(id); return is_valid ? (seq_id == id) : -1; } void num_tree_t::iterator_t::next() { if (!is_valid) { return; } if (is_compact_id_list) { if (++index >= id_list_array_len) { is_valid = false; return; } seq_id = id_list_array[index]; } else { id_list_iterator.next(); if (!id_list_iterator.valid()) { is_valid = false; return; } seq_id = id_list_iterator.id(); } } void num_tree_t::iterator_t::skip_to(uint32_t id) { if (!is_valid) { return; } if (is_compact_id_list) { ArrayUtils::skip_index_to_id(index, id_list_array, id_list_array_len, id); if (index >= id_list_array_len) { is_valid = false; return; } seq_id = id_list_array[index]; } else { id_list_iterator.skip_to(id); if (!id_list_iterator.valid()) { is_valid = false; return; } seq_id = id_list_iterator.id(); } } void num_tree_t::iterator_t::reset() { if (is_compact_id_list) { index = 0; is_valid = index < id_list_array_len; if (is_valid) { seq_id = id_list_array[index]; } } else { id_list_iterator = id_list->new_iterator(); is_valid = id_list_iterator.valid(); if (is_valid) { seq_id = id_list_iterator.id(); } } } num_tree_t::iterator_t::~iterator_t() { if (is_compact_id_list) { delete[] id_list_array; } } num_tree_t::iterator_t& num_tree_t::iterator_t::operator=(num_tree_t::iterator_t&& obj) noexcept { if (&obj == this) { return *this; } if (is_compact_id_list) { delete[] id_list_array; } if (obj.is_compact_id_list) { is_compact_id_list = true; id_list_array_len = obj.id_list_array_len; id_list_array = obj.id_list_array; index = obj.index; obj.id_list_array = nullptr; } else { is_compact_id_list = false; id_list = obj.id_list; id_list_iterator = id_list->new_iterator(); id_list_iterator.skip_to(obj.id_list_iterator.id()); } approx_filter_ids_length = obj.approx_filter_ids_length; is_valid = obj.is_valid; seq_id = obj.seq_id; return *this; }
18,356
C++
.cpp
475
29.336842
113
0.549401
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,817
vector_query_ops.cpp
typesense_typesense/src/vector_query_ops.cpp
#include "vector_query_ops.h" #include "string_utils.h" #include "collection.h" Option<bool> VectorQueryOps::parse_vector_query_str(const std::string& vector_query_str, vector_query_t& vector_query, const bool is_wildcard_query, const Collection* coll, const bool allow_empty_query) { // FORMAT: // field_name:([0.34, 0.66, 0.12, 0.68], k: 10) size_t i = 0; while(i < vector_query_str.size()) { if(vector_query_str[i] == '(' || vector_query_str[i] == '[') { // If we hit a bracket before a colon, it's a missing colon error return Option<bool>(400, "Malformed vector query string: `:` is missing after the vector field name."); } if(vector_query_str[i] != ':') { vector_query.field_name += vector_query_str[i]; i++; } else { // field name is done i++; StringUtils::trim(vector_query.field_name); while(i < vector_query_str.size() && vector_query_str[i] != '(') { i++; } if(vector_query_str[i] != '(') { // missing "(" return Option<bool>(400, "Malformed vector query string."); } i++; while(i < vector_query_str.size() && vector_query_str[i] != '[') { i++; } if(vector_query_str[i] != '[') { // missing opening "[" return Option<bool>(400, "Malformed vector query string."); } i++; std::string values_str; while(i < vector_query_str.size() && vector_query_str[i] != ']') { values_str += vector_query_str[i]; i++; } if(vector_query_str[i] != ']') { // missing closing "]" return Option<bool>(400, "Malformed vector query string."); } i++; std::vector<std::string> svalues; StringUtils::split(values_str, svalues, ","); for(auto& svalue: svalues) { if(!StringUtils::is_float(svalue)) { return Option<bool>(400, "Malformed vector query string: one of the vector values is not a float."); } vector_query.values.push_back(std::stof(svalue)); } if(i == vector_query_str.size()-1) { // missing params if(vector_query.values.empty() && !allow_empty_query) { // when query values are missing, atleast the `id` parameter must be present return Option<bool>(400, "When a vector query value is empty, an `id` parameter must be present."); } return Option<bool>(true); } std::string param_str = vector_query_str.substr(i, (vector_query_str.size() - i)); std::vector<std::string> param_kvs; StringUtils::split(param_str, param_kvs, ","); for(size_t i = 0; i < param_kvs.size(); i++) { auto& param_kv_str = param_kvs[i]; if(param_kv_str.back() == ')') { param_kv_str.pop_back(); } std::vector<std::string> param_kv; StringUtils::split(param_kv_str, param_kv, ":"); if(param_kv.size() != 2) { return Option<bool>(400, "Malformed vector query string."); } if(i < param_kvs.size() - 1 && param_kv[1].front() == '[' && param_kv[1].back() != ']') { /* Currently, we parse vector query parameters by splitting them with commas (e.g., alpha:0.7, k:100). However, this approach has challenges when dealing with array parameters, where values are also separated by commas. For instance, with a vector query like embedding:([], qs:[x, y]), our logic may incorrectly parse it as qs:[x and y]) due to the comma separator. To address this issue, we have implemented a workaround. If a comma-separated vector query parameter has '[' as its first character and does not have ']' as its last character, this means that the parameter is not yet complete. In this case, we append the current parameter to the next parameter, and continue parsing the next parameter. */ param_kvs[i+1] = param_kv_str + "," + param_kvs[i+1]; continue; } if(param_kv[0] == "id") { if(!vector_query.values.empty()) { // cannot pass both vector values and id return Option<bool>(400, "Malformed vector query string: cannot pass both vector query " "and `id` parameter."); } Option<uint32_t> id_op = coll->doc_id_to_seq_id(param_kv[1]); if(!id_op.ok()) { return Option<bool>(400, "Document id referenced in vector query is not found."); } nlohmann::json document; auto doc_op = coll->get_document_from_store(id_op.get(), document); if(!doc_op.ok()) { return Option<bool>(400, "Document id referenced in vector query is not found."); } if(!document.contains(vector_query.field_name) || !document[vector_query.field_name].is_array()) { return Option<bool>(400, "Document referenced in vector query does not contain a valid " "vector field."); } for(auto& fvalue: document[vector_query.field_name]) { if(!fvalue.is_number()) { return Option<bool>(400, "Document referenced in vector query does not contain a valid " "vector field."); } vector_query.values.push_back(fvalue.get<float>()); } vector_query.query_doc_given = true; vector_query.seq_id = id_op.get(); } if(param_kv[0] == "k") { if(!StringUtils::is_uint32_t(param_kv[1])) { return Option<bool>(400, "Malformed vector query string: `k` parameter must be an integer."); } vector_query.k = std::stoul(param_kv[1]); } if(param_kv[0] == "flat_search_cutoff") { if(!StringUtils::is_uint32_t(param_kv[1])) { return Option<bool>(400, "Malformed vector query string: " "`flat_search_cutoff` parameter must be an integer."); } vector_query.flat_search_cutoff = std::stoi(param_kv[1]); } if(param_kv[0] == "distance_threshold") { auto search_schema = const_cast<Collection*>(coll)->get_schema(); auto vector_field_it = search_schema.find(vector_query.field_name); if(vector_field_it == search_schema.end()) { return Option<bool>(400, "Malformed vector query string: could not find a field named " "`" + vector_query.field_name + "`."); } if(!StringUtils::is_float(param_kv[1])) { return Option<bool>(400, "Malformed vector query string: " "`distance_threshold` parameter must be a float."); } auto distance_threshold = std::stof(param_kv[1]); if(vector_field_it->vec_dist == cosine && (distance_threshold < 0.0 || distance_threshold > 2.0)) { return Option<bool>(400, "Malformed vector query string: " "`distance_threshold` parameter must be a float between 0.0-2.0."); } vector_query.distance_threshold = distance_threshold; } if(param_kv[0] == "alpha") { if(!StringUtils::is_float(param_kv[1]) || std::stof(param_kv[1]) < 0.0 || std::stof(param_kv[1]) > 1.0) { return Option<bool>(400, "Malformed vector query string: " "`alpha` parameter must be a float between 0.0-1.0."); } vector_query.alpha = std::stof(param_kv[1]); } if(param_kv[0] == "ef") { if(!StringUtils::is_uint32_t(param_kv[1]) || std::stoul(param_kv[1]) == 0) { return Option<bool>(400, "Malformed vector query string: `ef` parameter must be a positive integer."); } vector_query.ef = std::stoul(param_kv[1]); } if(param_kv[0] == "queries") { if(param_kv[1].front() != '[' || param_kv[1].back() != ']') { return Option<bool>(400, "Malformed vector query string: " "`queries` parameter must be a list of strings."); } param_kv[1].erase(0, 1); param_kv[1].pop_back(); std::vector<std::string> qs; StringUtils::split(param_kv[1], qs, ","); for(auto& q: qs) { StringUtils::trim(q); vector_query.queries.push_back(q); } } if(param_kv[0] == "query_weights") { if(param_kv[1].front() != '[' || param_kv[1].back() != ']') { return Option<bool>(400, "Malformed vector query string: " "`query_weights` parameter must be a list of floats."); } param_kv[1].erase(0, 1); param_kv[1].pop_back(); std::vector<std::string> ws; StringUtils::split(param_kv[1], ws, ","); for(auto& w: ws) { StringUtils::trim(w); if(!StringUtils::is_float(w)) { return Option<bool>(400, "Malformed vector query string: " "`query_weights` parameter must be a list of floats."); } vector_query.query_weights.push_back(std::stof(w)); } } } if(vector_query.queries.size() != vector_query.query_weights.size() && !vector_query.query_weights.empty()) { return Option<bool>(400, "Malformed vector query string: " "`queries` and `query_weights` must be of the same length."); } if(!vector_query.query_weights.empty()) { float sum = 0.0; for(auto& w: vector_query.query_weights) { sum += w; } if(sum != 1.0) { return Option<bool>(400, "Malformed vector query string: " "`query_weights` must sum to 1.0."); } } return Option<bool>(true); } } // We hit the end of the string without finding a colon return Option<bool>(400, "Malformed vector query string: `:` is missing."); }
12,218
C++
.cpp
216
35.986111
191
0.455696
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,818
main.cpp
typesense_typesense/src/main/main.cpp
#include <stdlib.h> #include <iostream> #include <fstream> #include <vector> #include <numeric> #include <chrono> #include <sys/resource.h> #include "collection.h" #include "collection_manager.h" using namespace std; int main(int argc, char* argv[]) { const std::string state_dir_path = "/tmp/typesense-data"; system("rm -rf /tmp/typesense-data && mkdir -p /tmp/typesense-data"); Store *store = new Store(state_dir_path); CollectionManager & collectionManager = CollectionManager::get_instance(); std::atomic<bool> exit; collectionManager.init(store, 4, "abcd", exit); collectionManager.load(100, 10000); std::vector<field> fields_to_index = { field("lang", field_types::STRING, true), field("description", field_types::STRING, false), field("topics", field_types::STRING_ARRAY, true), field("stars", field_types::INT32, false), field("repo_name", field_types::STRING, false), field("org", field_types::STRING, true) }; Collection *collection = collectionManager.get_collection("github_top1k").get(); if(collection == nullptr) { collection = collectionManager.create_collection("github_top1k", 4, fields_to_index, "stars").get(); } int j = 0; while(j < 1000) { j++; std::ifstream infile(argv[1]); std::string json_line; std::cout << "BEGINNING Iteration: " << j << std::endl; auto begin = std::chrono::high_resolution_clock::now(); int doc_id = 0; while (std::getline(infile, json_line)) { nlohmann::json document = nlohmann::json::parse(json_line); //document["id"] = std::to_string(doc_id); document["id"] = document["org"].get<std::string>() + ":" + document["repo_name"].get<std::string>(); collection->add(document.dump()); doc_id++; } infile.close(); long long int timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count(); std::cout << "Time taken for insertion: " << timeMillis << "ms" << std::endl; begin = std::chrono::high_resolution_clock::now(); std::ifstream infile2(argv[1]); doc_id = 0; while (std::getline(infile2, json_line)) { nlohmann::json document = nlohmann::json::parse(json_line); //document["id"] = std::to_string(doc_id); document["id"] = document["org"].get<std::string>() + ":" + document["repo_name"].get<std::string>(); collection->remove(document["id"]); doc_id++; } infile2.close(); timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count(); struct rusage r_usage; getrusage(RUSAGE_SELF,&r_usage); std::cout << "Memory usage: " << r_usage.ru_maxrss << std::endl; std::cout << "Time taken for deletion: " << timeMillis << "ms" << std::endl; } collectionManager.dispose(); delete store; return 0; }
3,165
C++
.cpp
71
36.549296
129
0.601626
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,819
benchmark.cpp
typesense_typesense/src/main/benchmark.cpp
#include <stdlib.h> #include <iostream> #include <fstream> #include <vector> #include <numeric> #include <chrono> #include <art.h> #include <unordered_map> #include <queue> #include <ctime> #include "collection.h" #include "string_utils.h" #include "collection_manager.h" using namespace std; std::string get_query(StringUtils & string_utils, std::string & text) { std::vector<std::string> tokens; std::vector<std::string> normalized_tokens; StringUtils::split(text, tokens, " "); for(uint32_t i=0; i<tokens.size(); i++) { auto token = tokens[i]; //string_utils.unicode_normalize(token); normalized_tokens.push_back(token); } size_t rand_len = 0 + (rand() % static_cast<int>(2 - 0 + 1)); size_t rand_index = 0 + (rand() % static_cast<int>(tokens.size()-1 - 0 + 1)); size_t end_index = std::min(rand_index+rand_len, tokens.size()-1); std::stringstream ss; for(auto i = rand_index; i <= end_index; i++) { if(i != rand_index) { ss << " "; } ss << normalized_tokens[i]; } return ss.str(); } void benchmark_hn_titles(char* file_path) { std::vector<field> fields_to_index = { field("title", field_types::STRING, false), field("points", field_types::INT32, false) }; Store *store = new Store("/tmp/typesense-data"); CollectionManager & collectionManager = CollectionManager::get_instance(); std::atomic<bool> quit; collectionManager.init(store, 1, "abcd", quit); collectionManager.load(100, 100); Collection *collection = collectionManager.get_collection("hnstories_direct").get(); if(collection == nullptr) { collection = collectionManager.create_collection("hnstories_direct", 4, fields_to_index, "points").get(); } std::ifstream infile(file_path); std::string json_line; StringUtils string_utils; std::vector<std::string> queries; size_t counter = 0; auto begin0 = std::chrono::high_resolution_clock::now(); while (std::getline(infile, json_line)) { counter++; collection->add(json_line); if(counter % 100 == 0) { nlohmann::json obj = nlohmann::json::parse(json_line); std::string title = obj["title"]; std::string query = get_query(string_utils, title); queries.push_back(query); } } infile.close(); long long int timeMillis0 = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin0).count(); std::cout << "FINISHED INDEXING!" << flush << std::endl; std::cout << "Time taken: " << timeMillis0 << "ms" << std::endl; std::vector<std::string> search_fields = {"title"}; uint64_t results_total = 0; // to prevent no-op optimization! auto begin = std::chrono::high_resolution_clock::now(); for(size_t i = 0; i < queries.size(); i++) { auto results_op = collection->search(queries[i], search_fields, "", { }, {sort_by("points", "DESC")}, {2}, 10, 1, MAX_SCORE, {true}); if(results_op.ok() != true) { exit(2); } auto results = results_op.get(); results_total += results["hits"].size(); } long long int timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count(); std::cout << "Number of queries: " << queries.size() << std::endl; std::cout << "Time taken: " << timeMillis << "ms" << std::endl; std::cout << "Results total: " << results_total << std::endl; } void benchmark_reactjs_pages(char* file_path) { std::vector<field> fields_to_index = { field("url", field_types::STRING, false), field("h1", field_types::STRING, false), field("h2", field_types::STRING_ARRAY, false), field("h3", field_types::STRING_ARRAY, false), field("h4", field_types::STRING_ARRAY, false), field("h5", field_types::STRING_ARRAY, false), field("h6", field_types::STRING_ARRAY, false), field("p", field_types::STRING_ARRAY, false), field("dummy_sorting_field", field_types::INT32, false) }; Store *store = new Store("/tmp/typesense-data"); CollectionManager & collectionManager = CollectionManager::get_instance(); std::atomic<bool> quit; collectionManager.init(store, 4, "abcd", quit); collectionManager.load(100, 100); Collection* collection = collectionManager.get_collection("reactjs_pages").get(); if(collection == nullptr) { collection = collectionManager.create_collection("reactjs_pages", 4, fields_to_index, "dummy_sorting_field").get(); } std::ifstream infile(file_path); std::string json_line; StringUtils string_utils; std::vector<std::string> queries; size_t counter = 0; while (std::getline(infile, json_line)) { counter++; collection->add(json_line); if(counter % 1 == 0) { nlohmann::json obj = nlohmann::json::parse(json_line); std::string title = obj["p"][0]; std::string query = get_query(string_utils, title); queries.push_back(query); } } infile.close(); std::cout << "FINISHED INDEXING!" << flush << std::endl; std::vector<std::string> search_fields = {"h1", "h2", "h3", "h4", "h5", "h6", "p"}; uint64_t results_total = 0; // to prevent no-op optimization! auto begin = std::chrono::high_resolution_clock::now(); for(size_t i = 0; i < queries.size(); i++) { auto results_op = collection->search(queries[i], search_fields, "", { }, {sort_by("dummy_sorting_field", "DESC")}, {2}, 10, 1, MAX_SCORE, {true}, 10, spp::sparse_hash_set<std::string>(), {"p"}); if(results_op.ok() != true) { exit(2); } auto results = results_op.get(); results_total += results["hits"].size(); } long long int timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count(); std::cout << "Number of queries: " << queries.size() << std::endl; std::cout << "Time taken: " << timeMillis << "ms" << std::endl; std::cout << "Results total: " << results_total << std::endl; } void generate_word_freq() { std::ifstream infile("/tmp/unigram_freq.jsonl"); std::ofstream outfile("/tmp/eng_words.jsonl", std::ios_base::app); std::string json_line; while (std::getline(infile, json_line)) { try { nlohmann::json obj = nlohmann::json::parse(json_line); obj["count"] = uint64_t((double(obj["count"].get<uint64_t>()) / 23135851162) * 1000000000); std::string json_str = obj.dump(); outfile << json_str << std::endl; } catch(...) { LOG(ERROR) << "Failed parsing: " << json_line; } } infile.close(); outfile.close(); } int main(int argc, char* argv[]) { srand(time(NULL)); // system("rm -rf /tmp/typesense-data && mkdir -p /tmp/typesense-data"); // benchmark_hn_titles(argv[1]); // benchmark_reactjs_pages(argv[1]); generate_word_freq(); return 0; }
7,224
C++
.cpp
163
37.539877
146
0.608287
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,820
typesense_server.cpp
typesense_typesense/src/main/typesense_server.cpp
#include <housekeeper.h> #include "typesense_server_utils.h" #include "core_api.h" #include "tsconfig.h" #include "stackprinter.h" #include "backward.hpp" #include "butil/at_exit.h" #ifndef ASAN_BUILD extern "C" { #include "jemalloc.h" } #ifdef __APPLE__ extern "C" { extern void je_zone_register(); } #endif #endif void master_server_routes() { // collection operations // NOTE: placing this first to score an immediate hit on O(N) route search server->get("/collections/:collection/documents/search", get_search); server->post("/multi_search", post_multi_search); // document management // NOTE:`/documents/:id` end-points must be placed last in the list server->post("/collections/:collection/documents", post_add_document); server->del("/collections/:collection/documents", del_remove_documents, false, true); server->post("/collections/:collection/documents/import", post_import_documents, true, true); server->get("/collections/:collection/documents/export", get_export_documents, false, true); server->get("/collections/:collection/documents/:id", get_fetch_document); server->patch("/collections/:collection/documents/:id", patch_update_document); server->patch("/collections/:collection/documents", patch_update_documents); server->del("/collections/:collection/documents/:id", del_remove_document); server->get("/collections/:collection/overrides", get_overrides); server->get("/collections/:collection/overrides/:id", get_override); server->put("/collections/:collection/overrides/:id", put_override); server->del("/collections/:collection/overrides/:id", del_override); server->get("/collections/:collection/synonyms", get_synonyms); server->get("/collections/:collection/synonyms/:id", get_synonym); server->put("/collections/:collection/synonyms/:id", put_synonym); server->del("/collections/:collection/synonyms/:id", del_synonym); // collection management server->post("/collections", post_create_collection); server->patch("/collections/:collection", patch_update_collection); server->get("/collections", get_collections); server->del("/collections/:collection", del_drop_collection); server->get("/collections/:collection", get_collection_summary); server->get("/aliases", get_aliases); server->get("/aliases/:alias", get_alias); server->put("/aliases/:alias", put_upsert_alias); server->del("/aliases/:alias", del_alias); server->get("/keys", get_keys); server->get("/keys/:id", get_key); server->post("/keys", post_create_key); server->del("/keys/:id", del_key); server->get("/presets", get_presets); server->get("/presets/:name", get_preset); server->put("/presets/:name", put_upsert_preset); server->del("/presets/:name", del_preset); server->get("/stopwords", get_stopwords); server->get("/stopwords/:name", get_stopword); server->put("/stopwords/:name", put_upsert_stopword); server->del("/stopwords/:name", del_stopword); // analytics server->get("/analytics/rules", get_analytics_rules); server->get("/analytics/rules/:name", get_analytics_rule); server->post("/analytics/rules", post_create_analytics_rules); server->put("/analytics/rules/:name", put_upsert_analytics_rules); server->del("/analytics/rules/:name", del_analytics_rules); server->post("/analytics/events", post_create_event); server->post("/analytics/aggregate_events", post_write_analytics_to_db); // meta server->get("/metrics.json", get_metrics_json); server->get("/stats.json", get_stats_json); server->get("/debug", get_debug); server->get("/health", get_health); server->get("/health_with_rusage", get_health_with_resource_usage); server->post("/health", post_health); server->get("/status", get_status); server->post("/operations/snapshot", post_snapshot, false, true); server->post("/operations/vote", post_vote, false, false); server->post("/operations/cache/clear", post_clear_cache, false, false); server->post("/operations/db/compact", post_compact_db, false, false); server->post("/operations/reset_peers", post_reset_peers, false, false); server->post("/conversations/models", post_conversation_model); server->get("/conversations/models", get_conversation_models); server->get("/conversations/models/:id", get_conversation_model); server->put("/conversations/models/:id", put_conversation_model); server->del("/conversations/models/:id", del_conversation_model); server->post("/personalization/models", post_personalization_model); server->get("/personalization/models", get_personalization_models); server->get("/personalization/models/:id", get_personalization_model); server->del("/personalization/models/:id", del_personalization_model); server->put("/personalization/models/:id", put_personalization_model); server->get("/limits", get_rate_limits); server->get("/limits/active", get_active_throttles); server->get("/limits/exceeds", get_limit_exceed_counts); server->get("/limits/:id", get_rate_limit); server->post("/limits", post_rate_limit); server->put("/limits/:id", put_rate_limit); server->del("/limits/:id", del_rate_limit); server->del("/limits/active/:id", del_throttle); server->del("/limits/exceeds/:id", del_exceed); server->post("/config", post_config, false, false); // for proxying remote embedders server->post("/proxy", post_proxy); } void (*backward::SignalHandling::_callback)(int sig, backward::StackTrace&) = nullptr; void crash_callback(int sig, backward::StackTrace& st) { backward::TraceResolver tr; tr.load_stacktrace(st); for (size_t i = 0; i < st.size(); ++i) { backward::ResolvedTrace trace = tr.resolve(st[i]); if(trace.object_function.find("BatchedIndexer") != std::string::npos || trace.object_function.find("batch_memory_index") != std::string::npos) { server->persist_applying_index(); break; } } HouseKeeper::get_instance().log_running_queries(); LOG(ERROR) << "Typesense " << TYPESENSE_VERSION << " is terminating abruptly."; } int main(int argc, char **argv) { #ifndef ASAN_BUILD #ifdef __APPLE__ // On OS X, je_zone_register registers jemalloc with the system allocator. // We have to force the presence of these symbols on macOS by explicitly calling this method. // See these issues: // - https://github.com/jemalloc/jemalloc/issues/708 // - https://github.com/ClickHouse/ClickHouse/pull/11897 je_zone_register(); #endif #endif butil::AtExitManager exit_manager; Config& config = Config::get_instance(); cmdline::parser options; init_cmdline_options(options, argc, argv); options.parse(argc, argv); // Command line args override env vars config.load_config_env(); config.load_config_file(options); config.load_config_cmd_args(options); Option<bool> config_validitation = config.is_valid(); if(!config_validitation.ok()) { std::cerr << "Typesense " << TYPESENSE_VERSION << std::endl; std::cerr << "Invalid configuration: " << config_validitation.error() << std::endl; std::cerr << "Command line " << options.usage() << std::endl; std::cerr << "You can also pass these arguments as environment variables such as " << "TYPESENSE_DATA_DIR, TYPESENSE_API_KEY, etc." << std::endl; exit(1); } int ret_code = init_root_logger(config, TYPESENSE_VERSION); if(ret_code != 0) { return ret_code; } #ifdef __APPLE__ #ifdef USE_BACKWARD backward::SignalHandling sh; sh._callback = crash_callback; #else signal(SIGABRT, StackPrinter::bt_sighandler); signal(SIGFPE, StackPrinter::bt_sighandler); signal(SIGILL, StackPrinter::bt_sighandler); signal(SIGSEGV, StackPrinter::bt_sighandler); #endif #elif __linux__ backward::SignalHandling sh; sh._callback = crash_callback; #endif // we can install new signal handlers only after overriding above signal(SIGINT, catch_interrupt); signal(SIGTERM, catch_interrupt); init_api(config.get_cache_num_entries()); return run_server(config, TYPESENSE_VERSION, &master_server_routes); }
8,372
C++
.cpp
173
43.260116
97
0.688273
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,821
port_config.h
typesense_typesense/bazel/leveldb/port_config.h
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // Copyright 2017 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ #define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ // Define to 1 if you have a definition for fdatasync() in <unistd.h>. #define HAVE_FUNC_FDATASYNC 1 // Define to 1 if you have Google CRC32C. #define HAVE_CRC32C 1 // Define to 1 if you have Google Snappy. #define HAVE_SNAPPY 1 // Define to 1 if your processor stores words with the most significant byte // first (like Motorola and SPARC, unlike Intel and VAX). #define LEVELDB_IS_BIG_ENDIAN 0 #endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
1,553
C++
.h
32
47.34375
77
0.772277
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,822
port.h
typesense_typesense/bazel/leveldb/port.h
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_PORT_PORT_H_ #define STORAGE_LEVELDB_PORT_PORT_H_ #include <string.h> #define LEVELDB_HAS_PORT_CONFIG_H 1 // Include the appropriate platform specific file below. If you are // porting to a new platform, see "port_example.h" for documentation // of what the new port_<platform>.h file must provide. #include "port/port_stdcxx.h" #endif // STORAGE_LEVELDB_PORT_PORT_H_
1,419
C++
.h
29
47.758621
77
0.770397
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,823
topster.h
typesense_typesense/include/topster.h
#pragma once #include <cstdint> #include <climits> #include <cstdio> #include <algorithm> #include <unordered_map> #include <field.h> #include "filter_result_iterator.h" struct KV { int8_t match_score_index{}; uint16_t query_index{}; uint16_t array_index{}; uint64_t key{}; uint64_t distinct_key{}; int64_t scores[3]{}; // match score + 2 custom attributes // only to be used in hybrid search float vector_distance = -1.0f; int64_t text_match_score = 0; // to be used only in final aggregation uint64_t* query_indices = nullptr; std::map<std::string, reference_filter_result_t> reference_filter_results; KV(uint16_t queryIndex, uint64_t key, uint64_t distinct_key, int8_t match_score_index, const int64_t *scores, std::map<std::string, reference_filter_result_t> reference_filter_results = {}): match_score_index(match_score_index), query_index(queryIndex), array_index(0), key(key), distinct_key(distinct_key), reference_filter_results(std::move(reference_filter_results)) { this->scores[0] = scores[0]; this->scores[1] = scores[1]; this->scores[2] = scores[2]; if(match_score_index >= 0){ this->text_match_score = scores[match_score_index]; } } KV() = default; KV(KV& kv) = default; KV(KV&& kv) noexcept : match_score_index(kv.match_score_index), query_index(kv.query_index), array_index(kv.array_index), key(kv.key), distinct_key(kv.distinct_key) { scores[0] = kv.scores[0]; scores[1] = kv.scores[1]; scores[2] = kv.scores[2]; query_indices = kv.query_indices; kv.query_indices = nullptr; vector_distance = kv.vector_distance; text_match_score = kv.text_match_score; reference_filter_results = std::move(kv.reference_filter_results); } KV& operator=(KV&& kv) noexcept { if (this != &kv) { match_score_index = kv.match_score_index; query_index = kv.query_index; array_index = kv.array_index; key = kv.key; distinct_key = kv.distinct_key; scores[0] = kv.scores[0]; scores[1] = kv.scores[1]; scores[2] = kv.scores[2]; delete[] query_indices; query_indices = kv.query_indices; kv.query_indices = nullptr; vector_distance = kv.vector_distance; text_match_score = kv.text_match_score; reference_filter_results = std::move(kv.reference_filter_results); } return *this; } KV& operator=(KV& kv) noexcept { if (this != &kv) { match_score_index = kv.match_score_index; query_index = kv.query_index; array_index = kv.array_index; key = kv.key; distinct_key = kv.distinct_key; scores[0] = kv.scores[0]; scores[1] = kv.scores[1]; scores[2] = kv.scores[2]; delete[] query_indices; query_indices = kv.query_indices; kv.query_indices = nullptr; vector_distance = kv.vector_distance; text_match_score = kv.text_match_score; reference_filter_results.clear(); for (const auto& item: kv.reference_filter_results) { reference_filter_results[item.first] = item.second; } } return *this; } ~KV() { delete [] query_indices; query_indices = nullptr; } }; /* * Remembers the max-K elements seen so far using a min-heap */ struct Topster { const uint32_t MAX_SIZE; uint32_t size; KV *data; KV** kvs; std::unordered_map<uint64_t, KV*> kv_map; spp::sparse_hash_set<uint64_t> group_doc_seq_ids; spp::sparse_hash_map<uint64_t, Topster*> group_kv_map; size_t distinct; explicit Topster(size_t capacity): Topster(capacity, 0) { } explicit Topster(size_t capacity, size_t distinct): MAX_SIZE(capacity), size(0), distinct(distinct) { // we allocate data first to get a memory block whose indices are then assigned to `kvs` // we use separate **kvs for easier pointer swaps data = new KV[capacity]; kvs = new KV*[capacity]; for(size_t i=0; i<capacity; i++) { data[i].match_score_index = 0; data[i].query_index = 0; data[i].array_index = i; data[i].key = 0; data[i].distinct_key = 0; kvs[i] = &data[i]; } } ~Topster() { delete[] data; delete[] kvs; for(auto& kv: group_kv_map) { delete kv.second; } data = nullptr; kvs = nullptr; group_kv_map.clear(); } static inline void swapMe(KV** a, KV** b) { KV *temp = *a; *a = *b; *b = temp; uint16_t a_index = (*a)->array_index; (*a)->array_index = (*b)->array_index; (*b)->array_index = a_index; } int add(KV* kv) { /*LOG(INFO) << "kv_map size: " << kv_map.size() << " -- kvs[0]: " << kvs[0]->scores[kvs[0]->match_score_index]; for(auto& mkv: kv_map) { LOG(INFO) << "kv key: " << mkv.first << " => " << mkv.second->scores[mkv.second->match_score_index]; }*/ int ret = 1; bool less_than_min_heap = (size >= MAX_SIZE) && is_smaller(kv, kvs[0]); size_t heap_op_index = 0; if(!distinct && less_than_min_heap) { // for non-distinct, if incoming value is smaller than min-heap ignore return 0; } bool SIFT_DOWN = true; if(distinct) { const auto& doc_seq_id_exists = (group_doc_seq_ids.find(kv->key) != group_doc_seq_ids.end()); if(doc_seq_id_exists) { ret = 2; } group_doc_seq_ids.emplace(kv->key); // Grouping cannot be a streaming operation, so aggregate the KVs associated with every group. auto kvs_it = group_kv_map.find(kv->distinct_key); if(kvs_it != group_kv_map.end()) { kvs_it->second->add(kv); } else { Topster* g_topster = new Topster(distinct, 0); g_topster->add(kv); group_kv_map.insert({kv->distinct_key, g_topster}); } return ret; } else { // not distinct //LOG(INFO) << "Searching for key: " << kv->key; const auto& found_it = kv_map.find(kv->key); bool is_duplicate_key = (found_it != kv_map.end()); /* is_duplicate_key: SIFT_DOWN regardless of `size`. Else: Do SIFT_UP if size < max_size Else SIFT_DOWN */ if(is_duplicate_key) { // Need to check if kv is greater than existing duplicate kv. KV* existing_kv = found_it->second; //LOG(INFO) << "existing_kv: " << existing_kv->key << " -> " << existing_kv->match_score; bool smaller_than_existing = is_smaller(kv, existing_kv); if(smaller_than_existing) { return 0; } SIFT_DOWN = true; // replace existing kv and sift down heap_op_index = existing_kv->array_index; kv_map.erase(kvs[heap_op_index]->key); } else { // not duplicate if(size < MAX_SIZE) { // we just copy to end of array SIFT_DOWN = false; heap_op_index = size; size++; } else { // kv is guaranteed to be > min heap. // we have to replace min heap element since array is full SIFT_DOWN = true; heap_op_index = 0; kv_map.erase(kvs[heap_op_index]->key); } } // kv will be copied into the pointer at heap_op_index kv_map.emplace(kv->key, kvs[heap_op_index]); } // we have to replace the existing element in the heap and sift down kv->array_index = heap_op_index; *kvs[heap_op_index] = *kv; // sift up/down to maintain heap property if(SIFT_DOWN) { while ((2 * heap_op_index + 1) < size) { uint32_t next = (2 * heap_op_index + 1); // left child if (next+1 < size && is_greater(kvs[next], kvs[next + 1])) { // for min heap we compare with the minimum of children next++; // right child (2n + 2) } if (is_greater(kvs[heap_op_index], kvs[next])) { swapMe(&kvs[heap_op_index], &kvs[next]); } else { break; } heap_op_index = next; } } else { // SIFT UP while(heap_op_index > 0) { uint32_t parent = (heap_op_index - 1) / 2; if (is_greater(kvs[parent], kvs[heap_op_index])) { swapMe(&kvs[heap_op_index], &kvs[parent]); heap_op_index = parent; } else { break; } } } return ret; } static bool is_greater(const struct KV* i, const struct KV* j) { return std::tie(i->scores[0], i->scores[1], i->scores[2], i->key) > std::tie(j->scores[0], j->scores[1], j->scores[2], j->key); } static bool is_smaller(const struct KV* i, const struct KV* j) { return std::tie(i->scores[0], i->scores[1], i->scores[2], i->key) < std::tie(j->scores[0], j->scores[1], j->scores[2], j->key); } static bool is_greater_kv_group(const std::vector<KV*>& i, const std::vector<KV*>& j) { return std::tie(i[0]->scores[0], i[0]->scores[1], i[0]->scores[2], i[0]->key) > std::tie(j[0]->scores[0], j[0]->scores[1], j[0]->scores[2], j[0]->key); } // topster must be sorted before iterated upon to remove dead array entries void sort() { if(!distinct) { std::stable_sort(kvs, kvs + size, is_greater); } } void clear(){ size = 0; } uint64_t getKeyAt(uint32_t index) { return kvs[index]->key; } uint64_t getDistinctKeyAt(uint32_t index) { return kvs[index]->distinct_key; } KV* getKV(uint32_t index) { return kvs[index]; } };
10,752
C++
.h
270
28.681481
119
0.524104
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,824
array_utils.h
typesense_typesense/include/array_utils.h
#pragma once #include <cstddef> #include <stdint.h> #include <array> /* Different intersection routines adapted from: * https://github.com/lemire/SIMDCompressionAndIntersection/blob/master/src/intersection.cpp */ class ArrayUtils { public: // Fast scalar scheme designed by N. Kurz. Returns the size of out (intersected set) static size_t and_scalar(const uint32_t *A, const size_t lenA, const uint32_t *B, const size_t lenB, uint32_t **out); static size_t or_scalar(const uint32_t *A, const size_t lenA, const uint32_t *B, const size_t lenB, uint32_t **out); static size_t exclude_scalar(const uint32_t *src, const size_t lenSrc, const uint32_t *filter, const size_t lenFilter, uint32_t **out); /// Performs binary search to find the index of id. If id is not found, curr_index is set to the index of next bigger /// number than id in the array. /// \return Whether or not id was found in array. static bool skip_index_to_id(uint32_t& curr_index, uint32_t const* const array, const uint32_t& array_len, const uint32_t& id); };
1,111
C++
.h
20
50.4
120
0.701012
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,825
dr_wav.h
typesense_typesense/include/dr_wav.h
/* WAV audio loader and writer. Choice of public domain or MIT-0. See license statements at the end of this file. dr_wav - v0.13.14 - 2023-12-02 David Reid - mackron@gmail.com GitHub: https://github.com/mackron/dr_libs */ /* Introduction ============ This is a single file library. To use it, do something like the following in one .c file. ```c #define DR_WAV_IMPLEMENTATION #include "dr_wav.h" ``` You can then #include this file in other parts of the program as you would with any other header file. Do something like the following to read audio data: ```c drwav wav; if (!drwav_init_file(&wav, "my_song.wav", NULL)) { // Error opening WAV file. } drwav_int32* pDecodedInterleavedPCMFrames = malloc(wav.totalPCMFrameCount * wav.channels * sizeof(drwav_int32)); size_t numberOfSamplesActuallyDecoded = drwav_read_pcm_frames_s32(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames); ... drwav_uninit(&wav); ``` If you just want to quickly open and read the audio data in a single operation you can do something like this: ```c unsigned int channels; unsigned int sampleRate; drwav_uint64 totalPCMFrameCount; float* pSampleData = drwav_open_file_and_read_pcm_frames_f32("my_song.wav", &channels, &sampleRate, &totalPCMFrameCount, NULL); if (pSampleData == NULL) { // Error opening and reading WAV file. } ... drwav_free(pSampleData, NULL); ``` The examples above use versions of the API that convert the audio data to a consistent format (32-bit signed PCM, in this case), but you can still output the audio data in its internal format (see notes below for supported formats): ```c size_t framesRead = drwav_read_pcm_frames(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames); ``` You can also read the raw bytes of audio data, which could be useful if dr_wav does not have native support for a particular data format: ```c size_t bytesRead = drwav_read_raw(&wav, bytesToRead, pRawDataBuffer); ``` dr_wav can also be used to output WAV files. This does not currently support compressed formats. To use this, look at `drwav_init_write()`, `drwav_init_file_write()`, etc. Use `drwav_write_pcm_frames()` to write samples, or `drwav_write_raw()` to write raw data in the "data" chunk. ```c drwav_data_format format; format.container = drwav_container_riff; // <-- drwav_container_riff = normal WAV files, drwav_container_w64 = Sony Wave64. format.format = DR_WAVE_FORMAT_PCM; // <-- Any of the DR_WAVE_FORMAT_* codes. format.channels = 2; format.sampleRate = 44100; format.bitsPerSample = 16; drwav_init_file_write(&wav, "data/recording.wav", &format, NULL); ... drwav_uint64 framesWritten = drwav_write_pcm_frames(pWav, frameCount, pSamples); ``` Note that writing to AIFF or RIFX is not supported. dr_wav has support for decoding from a number of different encapsulation formats. See below for details. Build Options ============= #define these options before including this file. #define DR_WAV_NO_CONVERSION_API Disables conversion APIs such as `drwav_read_pcm_frames_f32()` and `drwav_s16_to_f32()`. #define DR_WAV_NO_STDIO Disables APIs that initialize a decoder from a file such as `drwav_init_file()`, `drwav_init_file_write()`, etc. #define DR_WAV_NO_WCHAR Disables all functions ending with `_w`. Use this if your compiler does not provide wchar.h. Not required if DR_WAV_NO_STDIO is also defined. Supported Encapsulations ======================== - RIFF (Regular WAV) - RIFX (Big-Endian) - AIFF (Does not currently support ADPCM) - RF64 - W64 Note that AIFF and RIFX do not support write mode, nor do they support reading of metadata. Supported Encodings =================== - Unsigned 8-bit PCM - Signed 12-bit PCM - Signed 16-bit PCM - Signed 24-bit PCM - Signed 32-bit PCM - IEEE 32-bit floating point - IEEE 64-bit floating point - A-law and u-law - Microsoft ADPCM - IMA ADPCM (DVI, format code 0x11) 8-bit PCM encodings are always assumed to be unsigned. Signed 8-bit encoding can only be read with `drwav_read_raw()`. Note that ADPCM is not currently supported with AIFF. Contributions welcome. Notes ===== - Samples are always interleaved. - The default read function does not do any data conversion. Use `drwav_read_pcm_frames_f32()`, `drwav_read_pcm_frames_s32()` and `drwav_read_pcm_frames_s16()` to read and convert audio data to 32-bit floating point, signed 32-bit integer and signed 16-bit integer samples respectively. - dr_wav will try to read the WAV file as best it can, even if it's not strictly conformant to the WAV format. */ #ifndef dr_wav_h #define dr_wav_h #ifdef __cplusplus extern "C" { #endif #define DRWAV_STRINGIFY(x) #x #define DRWAV_XSTRINGIFY(x) DRWAV_STRINGIFY(x) #define DRWAV_VERSION_MAJOR 0 #define DRWAV_VERSION_MINOR 13 #define DRWAV_VERSION_REVISION 14 #define DRWAV_VERSION_STRING DRWAV_XSTRINGIFY(DRWAV_VERSION_MAJOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_MINOR) "." DRWAV_XSTRINGIFY(DRWAV_VERSION_REVISION) #include <stddef.h> /* For size_t. */ /* Sized Types */ typedef signed char drwav_int8; typedef unsigned char drwav_uint8; typedef signed short drwav_int16; typedef unsigned short drwav_uint16; typedef signed int drwav_int32; typedef unsigned int drwav_uint32; #if defined(_MSC_VER) && !defined(__clang__) typedef signed __int64 drwav_int64; typedef unsigned __int64 drwav_uint64; #else #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wlong-long" #if defined(__clang__) #pragma GCC diagnostic ignored "-Wc++11-long-long" #endif #endif typedef signed long long drwav_int64; typedef unsigned long long drwav_uint64; #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) #pragma GCC diagnostic pop #endif #endif #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(_M_ARM64) || defined(__powerpc64__) typedef drwav_uint64 drwav_uintptr; #else typedef drwav_uint32 drwav_uintptr; #endif typedef drwav_uint8 drwav_bool8; typedef drwav_uint32 drwav_bool32; #define DRWAV_TRUE 1 #define DRWAV_FALSE 0 /* End Sized Types */ /* Decorations */ #if !defined(DRWAV_API) #if defined(DRWAV_DLL) #if defined(_WIN32) #define DRWAV_DLL_IMPORT __declspec(dllimport) #define DRWAV_DLL_EXPORT __declspec(dllexport) #define DRWAV_DLL_PRIVATE static #else #if defined(__GNUC__) && __GNUC__ >= 4 #define DRWAV_DLL_IMPORT __attribute__((visibility("default"))) #define DRWAV_DLL_EXPORT __attribute__((visibility("default"))) #define DRWAV_DLL_PRIVATE __attribute__((visibility("hidden"))) #else #define DRWAV_DLL_IMPORT #define DRWAV_DLL_EXPORT #define DRWAV_DLL_PRIVATE static #endif #endif #if defined(DR_WAV_IMPLEMENTATION) || defined(DRWAV_IMPLEMENTATION) #define DRWAV_API DRWAV_DLL_EXPORT #else #define DRWAV_API DRWAV_DLL_IMPORT #endif #define DRWAV_PRIVATE DRWAV_DLL_PRIVATE #else #define DRWAV_API extern #define DRWAV_PRIVATE static #endif #endif /* End Decorations */ /* Result Codes */ typedef drwav_int32 drwav_result; #define DRWAV_SUCCESS 0 #define DRWAV_ERROR -1 /* A generic error. */ #define DRWAV_INVALID_ARGS -2 #define DRWAV_INVALID_OPERATION -3 #define DRWAV_OUT_OF_MEMORY -4 #define DRWAV_OUT_OF_RANGE -5 #define DRWAV_ACCESS_DENIED -6 #define DRWAV_DOES_NOT_EXIST -7 #define DRWAV_ALREADY_EXISTS -8 #define DRWAV_TOO_MANY_OPEN_FILES -9 #define DRWAV_INVALID_FILE -10 #define DRWAV_TOO_BIG -11 #define DRWAV_PATH_TOO_LONG -12 #define DRWAV_NAME_TOO_LONG -13 #define DRWAV_NOT_DIRECTORY -14 #define DRWAV_IS_DIRECTORY -15 #define DRWAV_DIRECTORY_NOT_EMPTY -16 #define DRWAV_END_OF_FILE -17 #define DRWAV_NO_SPACE -18 #define DRWAV_BUSY -19 #define DRWAV_IO_ERROR -20 #define DRWAV_INTERRUPT -21 #define DRWAV_UNAVAILABLE -22 #define DRWAV_ALREADY_IN_USE -23 #define DRWAV_BAD_ADDRESS -24 #define DRWAV_BAD_SEEK -25 #define DRWAV_BAD_PIPE -26 #define DRWAV_DEADLOCK -27 #define DRWAV_TOO_MANY_LINKS -28 #define DRWAV_NOT_IMPLEMENTED -29 #define DRWAV_NO_MESSAGE -30 #define DRWAV_BAD_MESSAGE -31 #define DRWAV_NO_DATA_AVAILABLE -32 #define DRWAV_INVALID_DATA -33 #define DRWAV_TIMEOUT -34 #define DRWAV_NO_NETWORK -35 #define DRWAV_NOT_UNIQUE -36 #define DRWAV_NOT_SOCKET -37 #define DRWAV_NO_ADDRESS -38 #define DRWAV_BAD_PROTOCOL -39 #define DRWAV_PROTOCOL_UNAVAILABLE -40 #define DRWAV_PROTOCOL_NOT_SUPPORTED -41 #define DRWAV_PROTOCOL_FAMILY_NOT_SUPPORTED -42 #define DRWAV_ADDRESS_FAMILY_NOT_SUPPORTED -43 #define DRWAV_SOCKET_NOT_SUPPORTED -44 #define DRWAV_CONNECTION_RESET -45 #define DRWAV_ALREADY_CONNECTED -46 #define DRWAV_NOT_CONNECTED -47 #define DRWAV_CONNECTION_REFUSED -48 #define DRWAV_NO_HOST -49 #define DRWAV_IN_PROGRESS -50 #define DRWAV_CANCELLED -51 #define DRWAV_MEMORY_ALREADY_MAPPED -52 #define DRWAV_AT_END -53 /* End Result Codes */ /* Common data formats. */ #define DR_WAVE_FORMAT_PCM 0x1 #define DR_WAVE_FORMAT_ADPCM 0x2 #define DR_WAVE_FORMAT_IEEE_FLOAT 0x3 #define DR_WAVE_FORMAT_ALAW 0x6 #define DR_WAVE_FORMAT_MULAW 0x7 #define DR_WAVE_FORMAT_DVI_ADPCM 0x11 #define DR_WAVE_FORMAT_EXTENSIBLE 0xFFFE /* Flags to pass into drwav_init_ex(), etc. */ #define DRWAV_SEQUENTIAL 0x00000001 #define DRWAV_WITH_METADATA 0x00000002 DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor, drwav_uint32* pRevision); DRWAV_API const char* drwav_version_string(void); /* Allocation Callbacks */ typedef struct { void* pUserData; void* (* onMalloc)(size_t sz, void* pUserData); void* (* onRealloc)(void* p, size_t sz, void* pUserData); void (* onFree)(void* p, void* pUserData); } drwav_allocation_callbacks; /* End Allocation Callbacks */ typedef enum { drwav_seek_origin_start, drwav_seek_origin_current } drwav_seek_origin; typedef enum { drwav_container_riff, drwav_container_rifx, drwav_container_w64, drwav_container_rf64, drwav_container_aiff } drwav_container; typedef struct { union { drwav_uint8 fourcc[4]; drwav_uint8 guid[16]; } id; /* The size in bytes of the chunk. */ drwav_uint64 sizeInBytes; /* RIFF = 2 byte alignment. W64 = 8 byte alignment. */ unsigned int paddingSize; } drwav_chunk_header; typedef struct { /* The format tag exactly as specified in the wave file's "fmt" chunk. This can be used by applications that require support for data formats not natively supported by dr_wav. */ drwav_uint16 formatTag; /* The number of channels making up the audio data. When this is set to 1 it is mono, 2 is stereo, etc. */ drwav_uint16 channels; /* The sample rate. Usually set to something like 44100. */ drwav_uint32 sampleRate; /* Average bytes per second. You probably don't need this, but it's left here for informational purposes. */ drwav_uint32 avgBytesPerSec; /* Block align. This is equal to the number of channels * bytes per sample. */ drwav_uint16 blockAlign; /* Bits per sample. */ drwav_uint16 bitsPerSample; /* The size of the extended data. Only used internally for validation, but left here for informational purposes. */ drwav_uint16 extendedSize; /* The number of valid bits per sample. When <formatTag> is equal to WAVE_FORMAT_EXTENSIBLE, <bitsPerSample> is always rounded up to the nearest multiple of 8. This variable contains information about exactly how many bits are valid per sample. Mainly used for informational purposes. */ drwav_uint16 validBitsPerSample; /* The channel mask. Not used at the moment. */ drwav_uint32 channelMask; /* The sub-format, exactly as specified by the wave file. */ drwav_uint8 subFormat[16]; } drwav_fmt; DRWAV_API drwav_uint16 drwav_fmt_get_format(const drwav_fmt* pFMT); /* Callback for when data is read. Return value is the number of bytes actually read. pUserData [in] The user data that was passed to drwav_init() and family. pBufferOut [out] The output buffer. bytesToRead [in] The number of bytes to read. Returns the number of bytes actually read. A return value of less than bytesToRead indicates the end of the stream. Do _not_ return from this callback until either the entire bytesToRead is filled or you have reached the end of the stream. */ typedef size_t (* drwav_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); /* Callback for when data is written. Returns value is the number of bytes actually written. pUserData [in] The user data that was passed to drwav_init_write() and family. pData [out] A pointer to the data to write. bytesToWrite [in] The number of bytes to write. Returns the number of bytes actually written. If the return value differs from bytesToWrite, it indicates an error. */ typedef size_t (* drwav_write_proc)(void* pUserData, const void* pData, size_t bytesToWrite); /* Callback for when data needs to be seeked. pUserData [in] The user data that was passed to drwav_init() and family. offset [in] The number of bytes to move, relative to the origin. Will never be negative. origin [in] The origin of the seek - the current position or the start of the stream. Returns whether or not the seek was successful. Whether or not it is relative to the beginning or current position is determined by the "origin" parameter which will be either drwav_seek_origin_start or drwav_seek_origin_current. */ typedef drwav_bool32 (* drwav_seek_proc)(void* pUserData, int offset, drwav_seek_origin origin); /* Callback for when drwav_init_ex() finds a chunk. pChunkUserData [in] The user data that was passed to the pChunkUserData parameter of drwav_init_ex() and family. onRead [in] A pointer to the function to call when reading. onSeek [in] A pointer to the function to call when seeking. pReadSeekUserData [in] The user data that was passed to the pReadSeekUserData parameter of drwav_init_ex() and family. pChunkHeader [in] A pointer to an object containing basic header information about the chunk. Use this to identify the chunk. container [in] Whether or not the WAV file is a RIFF or Wave64 container. If you're unsure of the difference, assume RIFF. pFMT [in] A pointer to the object containing the contents of the "fmt" chunk. Returns the number of bytes read + seeked. To read data from the chunk, call onRead(), passing in pReadSeekUserData as the first parameter. Do the same for seeking with onSeek(). The return value must be the total number of bytes you have read _plus_ seeked. Use the `container` argument to discriminate the fields in `pChunkHeader->id`. If the container is `drwav_container_riff` or `drwav_container_rf64` you should use `id.fourcc`, otherwise you should use `id.guid`. The `pFMT` parameter can be used to determine the data format of the wave file. Use `drwav_fmt_get_format()` to get the sample format, which will be one of the `DR_WAVE_FORMAT_*` identifiers. The read pointer will be sitting on the first byte after the chunk's header. You must not attempt to read beyond the boundary of the chunk. */ typedef drwav_uint64 (* drwav_chunk_proc)(void* pChunkUserData, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_chunk_header* pChunkHeader, drwav_container container, const drwav_fmt* pFMT); /* Structure for internal use. Only used for loaders opened with drwav_init_memory(). */ typedef struct { const drwav_uint8* data; size_t dataSize; size_t currentReadPos; } drwav__memory_stream; /* Structure for internal use. Only used for writers opened with drwav_init_memory_write(). */ typedef struct { void** ppData; size_t* pDataSize; size_t dataSize; size_t dataCapacity; size_t currentWritePos; } drwav__memory_stream_write; typedef struct { drwav_container container; /* RIFF, W64. */ drwav_uint32 format; /* DR_WAVE_FORMAT_* */ drwav_uint32 channels; drwav_uint32 sampleRate; drwav_uint32 bitsPerSample; } drwav_data_format; typedef enum { drwav_metadata_type_none = 0, /* Unknown simply means a chunk that drwav does not handle specifically. You can still ask to receive these chunks as metadata objects. It is then up to you to interpret the chunk's data. You can also write unknown metadata to a wav file. Be careful writing unknown chunks if you have also edited the audio data. The unknown chunks could represent offsets/sizes that no longer correctly correspond to the audio data. */ drwav_metadata_type_unknown = 1 << 0, /* Only 1 of each of these metadata items are allowed in a wav file. */ drwav_metadata_type_smpl = 1 << 1, drwav_metadata_type_inst = 1 << 2, drwav_metadata_type_cue = 1 << 3, drwav_metadata_type_acid = 1 << 4, drwav_metadata_type_bext = 1 << 5, /* Wav files often have a LIST chunk. This is a chunk that contains a set of subchunks. For this higher-level metadata API, we don't make a distinction between a regular chunk and a LIST subchunk. Instead, they are all just 'metadata' items. There can be multiple of these metadata items in a wav file. */ drwav_metadata_type_list_label = 1 << 6, drwav_metadata_type_list_note = 1 << 7, drwav_metadata_type_list_labelled_cue_region = 1 << 8, drwav_metadata_type_list_info_software = 1 << 9, drwav_metadata_type_list_info_copyright = 1 << 10, drwav_metadata_type_list_info_title = 1 << 11, drwav_metadata_type_list_info_artist = 1 << 12, drwav_metadata_type_list_info_comment = 1 << 13, drwav_metadata_type_list_info_date = 1 << 14, drwav_metadata_type_list_info_genre = 1 << 15, drwav_metadata_type_list_info_album = 1 << 16, drwav_metadata_type_list_info_tracknumber = 1 << 17, /* Other type constants for convenience. */ drwav_metadata_type_list_all_info_strings = drwav_metadata_type_list_info_software | drwav_metadata_type_list_info_copyright | drwav_metadata_type_list_info_title | drwav_metadata_type_list_info_artist | drwav_metadata_type_list_info_comment | drwav_metadata_type_list_info_date | drwav_metadata_type_list_info_genre | drwav_metadata_type_list_info_album | drwav_metadata_type_list_info_tracknumber, drwav_metadata_type_list_all_adtl = drwav_metadata_type_list_label | drwav_metadata_type_list_note | drwav_metadata_type_list_labelled_cue_region, drwav_metadata_type_all = -2, /*0xFFFFFFFF & ~drwav_metadata_type_unknown,*/ drwav_metadata_type_all_including_unknown = -1 /*0xFFFFFFFF,*/ } drwav_metadata_type; /* Sampler Metadata The sampler chunk contains information about how a sound should be played in the context of a whole audio production, and when used in a sampler. See https://en.wikipedia.org/wiki/Sample-based_synthesis. */ typedef enum { drwav_smpl_loop_type_forward = 0, drwav_smpl_loop_type_pingpong = 1, drwav_smpl_loop_type_backward = 2 } drwav_smpl_loop_type; typedef struct { /* The ID of the associated cue point, see drwav_cue and drwav_cue_point. As with all cue point IDs, this can correspond to a label chunk to give this loop a name, see drwav_list_label_or_note. */ drwav_uint32 cuePointId; /* See drwav_smpl_loop_type. */ drwav_uint32 type; /* The byte offset of the first sample to be played in the loop. */ drwav_uint32 firstSampleByteOffset; /* The byte offset into the audio data of the last sample to be played in the loop. */ drwav_uint32 lastSampleByteOffset; /* A value to represent that playback should occur at a point between samples. This value ranges from 0 to UINT32_MAX. Where a value of 0 means no fraction, and a value of (UINT32_MAX / 2) would mean half a sample. */ drwav_uint32 sampleFraction; /* Number of times to play the loop. 0 means loop infinitely. */ drwav_uint32 playCount; } drwav_smpl_loop; typedef struct { /* IDs for a particular MIDI manufacturer. 0 if not used. */ drwav_uint32 manufacturerId; drwav_uint32 productId; /* The period of 1 sample in nanoseconds. */ drwav_uint32 samplePeriodNanoseconds; /* The MIDI root note of this file. 0 to 127. */ drwav_uint32 midiUnityNote; /* The fraction of a semitone up from the given MIDI note. This is a value from 0 to UINT32_MAX, where 0 means no change and (UINT32_MAX / 2) is half a semitone (AKA 50 cents). */ drwav_uint32 midiPitchFraction; /* Data relating to SMPTE standards which are used for syncing audio and video. 0 if not used. */ drwav_uint32 smpteFormat; drwav_uint32 smpteOffset; /* drwav_smpl_loop loops. */ drwav_uint32 sampleLoopCount; /* Optional sampler-specific data. */ drwav_uint32 samplerSpecificDataSizeInBytes; drwav_smpl_loop* pLoops; drwav_uint8* pSamplerSpecificData; } drwav_smpl; /* Instrument Metadata The inst metadata contains data about how a sound should be played as part of an instrument. This commonly read by samplers. See https://en.wikipedia.org/wiki/Sample-based_synthesis. */ typedef struct { drwav_int8 midiUnityNote; /* The root note of the audio as a MIDI note number. 0 to 127. */ drwav_int8 fineTuneCents; /* -50 to +50 */ drwav_int8 gainDecibels; /* -64 to +64 */ drwav_int8 lowNote; /* 0 to 127 */ drwav_int8 highNote; /* 0 to 127 */ drwav_int8 lowVelocity; /* 1 to 127 */ drwav_int8 highVelocity; /* 1 to 127 */ } drwav_inst; /* Cue Metadata Cue points are markers at specific points in the audio. They often come with an associated piece of drwav_list_label_or_note metadata which contains the text for the marker. */ typedef struct { /* Unique identification value. */ drwav_uint32 id; /* Set to 0. This is only relevant if there is a 'playlist' chunk - which is not supported by dr_wav. */ drwav_uint32 playOrderPosition; /* Should always be "data". This represents the fourcc value of the chunk that this cue point corresponds to. dr_wav only supports a single data chunk so this should always be "data". */ drwav_uint8 dataChunkId[4]; /* Set to 0. This is only relevant if there is a wave list chunk. dr_wav, like lots of readers/writers, do not support this. */ drwav_uint32 chunkStart; /* Set to 0 for uncompressed formats. Else the last byte in compressed wave data where decompression can begin to find the value of the corresponding sample value. */ drwav_uint32 blockStart; /* For uncompressed formats this is the byte offset of the cue point into the audio data. For compressed formats this is relative to the block specified with blockStart. */ drwav_uint32 sampleByteOffset; } drwav_cue_point; typedef struct { drwav_uint32 cuePointCount; drwav_cue_point *pCuePoints; } drwav_cue; /* Acid Metadata This chunk contains some information about the time signature and the tempo of the audio. */ typedef enum { drwav_acid_flag_one_shot = 1, /* If this is not set, then it is a loop instead of a one-shot. */ drwav_acid_flag_root_note_set = 2, drwav_acid_flag_stretch = 4, drwav_acid_flag_disk_based = 8, drwav_acid_flag_acidizer = 16 /* Not sure what this means. */ } drwav_acid_flag; typedef struct { /* A bit-field, see drwav_acid_flag. */ drwav_uint32 flags; /* Valid if flags contains drwav_acid_flag_root_note_set. It represents the MIDI root note the file - a value from 0 to 127. */ drwav_uint16 midiUnityNote; /* Reserved values that should probably be ignored. reserved1 seems to often be 128 and reserved2 is 0. */ drwav_uint16 reserved1; float reserved2; /* Number of beats. */ drwav_uint32 numBeats; /* The time signature of the audio. */ drwav_uint16 meterDenominator; drwav_uint16 meterNumerator; /* Beats per minute of the track. Setting a value of 0 suggests that there is no tempo. */ float tempo; } drwav_acid; /* Cue Label or Note metadata These are 2 different types of metadata, but they have the exact same format. Labels tend to be the more common and represent a short name for a cue point. Notes might be used to represent a longer comment. */ typedef struct { /* The ID of a cue point that this label or note corresponds to. */ drwav_uint32 cuePointId; /* Size of the string not including any null terminator. */ drwav_uint32 stringLength; /* The string. The *init_with_metadata functions null terminate this for convenience. */ char* pString; } drwav_list_label_or_note; /* BEXT metadata, also known as Broadcast Wave Format (BWF) This metadata adds some extra description to an audio file. You must check the version field to determine if the UMID or the loudness fields are valid. */ typedef struct { /* These top 3 fields, and the umid field are actually defined in the standard as a statically sized buffers. In order to reduce the size of this struct (and therefore the union in the metadata struct), we instead store these as pointers. */ char* pDescription; /* Can be NULL or a null-terminated string, must be <= 256 characters. */ char* pOriginatorName; /* Can be NULL or a null-terminated string, must be <= 32 characters. */ char* pOriginatorReference; /* Can be NULL or a null-terminated string, must be <= 32 characters. */ char pOriginationDate[10]; /* ASCII "yyyy:mm:dd". */ char pOriginationTime[8]; /* ASCII "hh:mm:ss". */ drwav_uint64 timeReference; /* First sample count since midnight. */ drwav_uint16 version; /* Version of the BWF, check this to see if the fields below are valid. */ /* Unrestricted ASCII characters containing a collection of strings terminated by CR/LF. Each string shall contain a description of a coding process applied to the audio data. */ char* pCodingHistory; drwav_uint32 codingHistorySize; /* Fields below this point are only valid if the version is 1 or above. */ drwav_uint8* pUMID; /* Exactly 64 bytes of SMPTE UMID */ /* Fields below this point are only valid if the version is 2 or above. */ drwav_uint16 loudnessValue; /* Integrated Loudness Value of the file in LUFS (multiplied by 100). */ drwav_uint16 loudnessRange; /* Loudness Range of the file in LU (multiplied by 100). */ drwav_uint16 maxTruePeakLevel; /* Maximum True Peak Level of the file expressed as dBTP (multiplied by 100). */ drwav_uint16 maxMomentaryLoudness; /* Highest value of the Momentary Loudness Level of the file in LUFS (multiplied by 100). */ drwav_uint16 maxShortTermLoudness; /* Highest value of the Short-Term Loudness Level of the file in LUFS (multiplied by 100). */ } drwav_bext; /* Info Text Metadata There a many different types of information text that can be saved in this format. This is where things like the album name, the artists, the year it was produced, etc are saved. See drwav_metadata_type for the full list of types that dr_wav supports. */ typedef struct { /* Size of the string not including any null terminator. */ drwav_uint32 stringLength; /* The string. The *init_with_metadata functions null terminate this for convenience. */ char* pString; } drwav_list_info_text; /* Labelled Cue Region Metadata The labelled cue region metadata is used to associate some region of audio with text. The region starts at a cue point, and extends for the given number of samples. */ typedef struct { /* The ID of a cue point that this object corresponds to. */ drwav_uint32 cuePointId; /* The number of samples from the cue point forwards that should be considered this region */ drwav_uint32 sampleLength; /* Four characters used to say what the purpose of this region is. */ drwav_uint8 purposeId[4]; /* Unsure of the exact meanings of these. It appears to be acceptable to set them all to 0. */ drwav_uint16 country; drwav_uint16 language; drwav_uint16 dialect; drwav_uint16 codePage; /* Size of the string not including any null terminator. */ drwav_uint32 stringLength; /* The string. The *init_with_metadata functions null terminate this for convenience. */ char* pString; } drwav_list_labelled_cue_region; /* Unknown Metadata This chunk just represents a type of chunk that dr_wav does not understand. Unknown metadata has a location attached to it. This is because wav files can have a LIST chunk that contains subchunks. These LIST chunks can be one of two types. An adtl list, or an INFO list. This enum is used to specify the location of a chunk that dr_wav currently doesn't support. */ typedef enum { drwav_metadata_location_invalid, drwav_metadata_location_top_level, drwav_metadata_location_inside_info_list, drwav_metadata_location_inside_adtl_list } drwav_metadata_location; typedef struct { drwav_uint8 id[4]; drwav_metadata_location chunkLocation; drwav_uint32 dataSizeInBytes; drwav_uint8* pData; } drwav_unknown_metadata; /* Metadata is saved as a union of all the supported types. */ typedef struct { /* Determines which item in the union is valid. */ drwav_metadata_type type; union { drwav_cue cue; drwav_smpl smpl; drwav_acid acid; drwav_inst inst; drwav_bext bext; drwav_list_label_or_note labelOrNote; /* List label or list note. */ drwav_list_labelled_cue_region labelledCueRegion; drwav_list_info_text infoText; /* Any of the list info types. */ drwav_unknown_metadata unknown; } data; } drwav_metadata; typedef struct { /* A pointer to the function to call when more data is needed. */ drwav_read_proc onRead; /* A pointer to the function to call when data needs to be written. Only used when the drwav object is opened in write mode. */ drwav_write_proc onWrite; /* A pointer to the function to call when the wav file needs to be seeked. */ drwav_seek_proc onSeek; /* The user data to pass to callbacks. */ void* pUserData; /* Allocation callbacks. */ drwav_allocation_callbacks allocationCallbacks; /* Whether or not the WAV file is formatted as a standard RIFF file or W64. */ drwav_container container; /* Structure containing format information exactly as specified by the wav file. */ drwav_fmt fmt; /* The sample rate. Will be set to something like 44100. */ drwav_uint32 sampleRate; /* The number of channels. This will be set to 1 for monaural streams, 2 for stereo, etc. */ drwav_uint16 channels; /* The bits per sample. Will be set to something like 16, 24, etc. */ drwav_uint16 bitsPerSample; /* Equal to fmt.formatTag, or the value specified by fmt.subFormat if fmt.formatTag is equal to 65534 (WAVE_FORMAT_EXTENSIBLE). */ drwav_uint16 translatedFormatTag; /* The total number of PCM frames making up the audio data. */ drwav_uint64 totalPCMFrameCount; /* The size in bytes of the data chunk. */ drwav_uint64 dataChunkDataSize; /* The position in the stream of the first data byte of the data chunk. This is used for seeking. */ drwav_uint64 dataChunkDataPos; /* The number of bytes remaining in the data chunk. */ drwav_uint64 bytesRemaining; /* The current read position in PCM frames. */ drwav_uint64 readCursorInPCMFrames; /* Only used in sequential write mode. Keeps track of the desired size of the "data" chunk at the point of initialization time. Always set to 0 for non-sequential writes and when the drwav object is opened in read mode. Used for validation. */ drwav_uint64 dataChunkDataSizeTargetWrite; /* Keeps track of whether or not the wav writer was initialized in sequential mode. */ drwav_bool32 isSequentialWrite; /* A array of metadata. This is valid after the *init_with_metadata call returns. It will be valid until drwav_uninit() is called. You can take ownership of this data with drwav_take_ownership_of_metadata(). */ drwav_metadata* pMetadata; drwav_uint32 metadataCount; /* A hack to avoid a DRWAV_MALLOC() when opening a decoder with drwav_init_memory(). */ drwav__memory_stream memoryStream; drwav__memory_stream_write memoryStreamWrite; /* Microsoft ADPCM specific data. */ struct { drwav_uint32 bytesRemainingInBlock; drwav_uint16 predictor[2]; drwav_int32 delta[2]; drwav_int32 cachedFrames[4]; /* Samples are stored in this cache during decoding. */ drwav_uint32 cachedFrameCount; drwav_int32 prevFrames[2][2]; /* The previous 2 samples for each channel (2 channels at most). */ } msadpcm; /* IMA ADPCM specific data. */ struct { drwav_uint32 bytesRemainingInBlock; drwav_int32 predictor[2]; drwav_int32 stepIndex[2]; drwav_int32 cachedFrames[16]; /* Samples are stored in this cache during decoding. */ drwav_uint32 cachedFrameCount; } ima; /* AIFF specific data. */ struct { drwav_bool8 isLE; /* Will be set to true if the audio data is little-endian encoded. */ drwav_bool8 isUnsigned; /* Only used for 8-bit samples. When set to true, will be treated as unsigned. */ } aiff; } drwav; /* Initializes a pre-allocated drwav object for reading. pWav [out] A pointer to the drwav object being initialized. onRead [in] The function to call when data needs to be read from the client. onSeek [in] The function to call when the read position of the client data needs to move. onChunk [in, optional] The function to call when a chunk is enumerated at initialized time. pUserData, pReadSeekUserData [in, optional] A pointer to application defined data that will be passed to onRead and onSeek. pChunkUserData [in, optional] A pointer to application defined data that will be passed to onChunk. flags [in, optional] A set of flags for controlling how things are loaded. Returns true if successful; false otherwise. Close the loader with drwav_uninit(). This is the lowest level function for initializing a WAV file. You can also use drwav_init_file() and drwav_init_memory() to open the stream from a file or from a block of memory respectively. Possible values for flags: DRWAV_SEQUENTIAL: Never perform a backwards seek while loading. This disables the chunk callback and will cause this function to return as soon as the data chunk is found. Any chunks after the data chunk will be ignored. drwav_init() is equivalent to "drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0);". The onChunk callback is not called for the WAVE or FMT chunks. The contents of the FMT chunk can be read from pWav->fmt after the function returns. See also: drwav_init_file(), drwav_init_memory(), drwav_uninit() */ DRWAV_API drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_with_metadata(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); /* Initializes a pre-allocated drwav object for writing. onWrite [in] The function to call when data needs to be written. onSeek [in] The function to call when the write position needs to move. pUserData [in, optional] A pointer to application defined data that will be passed to onWrite and onSeek. metadata, numMetadata [in, optional] An array of metadata objects that should be written to the file. The array is not edited. You are responsible for this metadata memory and it must maintain valid until drwav_uninit() is called. Returns true if successful; false otherwise. Close the writer with drwav_uninit(). This is the lowest level function for initializing a WAV file. You can also use drwav_init_file_write() and drwav_init_memory_write() to open the stream from a file or from a block of memory respectively. If the total sample count is known, you can use drwav_init_write_sequential(). This avoids the need for dr_wav to perform a post-processing step for storing the total sample count and the size of the data chunk which requires a backwards seek. See also: drwav_init_file_write(), drwav_init_memory_write(), drwav_uninit() */ DRWAV_API drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_write_with_metadata(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks, drwav_metadata* pMetadata, drwav_uint32 metadataCount); /* Utility function to determine the target size of the entire data to be written (including all headers and chunks). Returns the target size in bytes. The metadata argument can be NULL meaning no metadata exists. Useful if the application needs to know the size to allocate. Only writing to the RIFF chunk and one data chunk is currently supported. See also: drwav_init_write(), drwav_init_file_write(), drwav_init_memory_write() */ DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_format* pFormat, drwav_uint64 totalFrameCount, drwav_metadata* pMetadata, drwav_uint32 metadataCount); /* Take ownership of the metadata objects that were allocated via one of the init_with_metadata() function calls. The init_with_metdata functions perform a single heap allocation for this metadata. Useful if you want the data to persist beyond the lifetime of the drwav object. You must free the data returned from this function using drwav_free(). */ DRWAV_API drwav_metadata* drwav_take_ownership_of_metadata(drwav* pWav); /* Uninitializes the given drwav object. Use this only for objects initialized with drwav_init*() functions (drwav_init(), drwav_init_ex(), drwav_init_write(), drwav_init_write_sequential()). */ DRWAV_API drwav_result drwav_uninit(drwav* pWav); /* Reads raw audio data. This is the lowest level function for reading audio data. It simply reads the given number of bytes of the raw internal sample data. Consider using drwav_read_pcm_frames_s16(), drwav_read_pcm_frames_s32() or drwav_read_pcm_frames_f32() for reading sample data in a consistent format. pBufferOut can be NULL in which case a seek will be performed. Returns the number of bytes actually read. */ DRWAV_API size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut); /* Reads up to the specified number of PCM frames from the WAV file. The output data will be in the file's internal format, converted to native-endian byte order. Use drwav_read_pcm_frames_s16/f32/s32() to read data in a specific format. If the return value is less than <framesToRead> it means the end of the file has been reached or you have requested more PCM frames than can possibly fit in the output buffer. This function will only work when sample data is of a fixed size and uncompressed. If you are using a compressed format consider using drwav_read_raw() or drwav_read_pcm_frames_s16/s32/f32(). pBufferOut can be NULL in which case a seek will be performed. */ DRWAV_API drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut); /* Seeks to the given PCM frame. Returns true if successful; false otherwise. */ DRWAV_API drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetFrameIndex); /* Retrieves the current read position in pcm frames. */ DRWAV_API drwav_result drwav_get_cursor_in_pcm_frames(drwav* pWav, drwav_uint64* pCursor); /* Retrieves the length of the file. */ DRWAV_API drwav_result drwav_get_length_in_pcm_frames(drwav* pWav, drwav_uint64* pLength); /* Writes raw audio data. Returns the number of bytes actually written. If this differs from bytesToWrite, it indicates an error. */ DRWAV_API size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData); /* Writes PCM frames. Returns the number of PCM frames written. Input samples need to be in native-endian byte order. On big-endian architectures the input data will be converted to little-endian. Use drwav_write_raw() to write raw audio data without performing any conversion. */ DRWAV_API drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData); DRWAV_API drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint64 framesToWrite, const void* pData); DRWAV_API drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 framesToWrite, const void* pData); /* Conversion Utilities */ #ifndef DR_WAV_NO_CONVERSION_API /* Reads a chunk of audio data and converts it to signed 16-bit PCM samples. pBufferOut can be NULL in which case a seek will be performed. Returns the number of PCM frames actually read. If the return value is less than <framesToRead> it means the end of the file has been reached. */ DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut); /* Low-level function for converting unsigned 8-bit PCM samples to signed 16-bit PCM samples. */ DRWAV_API void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting signed 24-bit PCM samples to signed 16-bit PCM samples. */ DRWAV_API void drwav_s24_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting signed 32-bit PCM samples to signed 16-bit PCM samples. */ DRWAV_API void drwav_s32_to_s16(drwav_int16* pOut, const drwav_int32* pIn, size_t sampleCount); /* Low-level function for converting IEEE 32-bit floating point samples to signed 16-bit PCM samples. */ DRWAV_API void drwav_f32_to_s16(drwav_int16* pOut, const float* pIn, size_t sampleCount); /* Low-level function for converting IEEE 64-bit floating point samples to signed 16-bit PCM samples. */ DRWAV_API void drwav_f64_to_s16(drwav_int16* pOut, const double* pIn, size_t sampleCount); /* Low-level function for converting A-law samples to signed 16-bit PCM samples. */ DRWAV_API void drwav_alaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting u-law samples to signed 16-bit PCM samples. */ DRWAV_API void drwav_mulaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Reads a chunk of audio data and converts it to IEEE 32-bit floating point samples. pBufferOut can be NULL in which case a seek will be performed. Returns the number of PCM frames actually read. If the return value is less than <framesToRead> it means the end of the file has been reached. */ DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut); /* Low-level function for converting unsigned 8-bit PCM samples to IEEE 32-bit floating point samples. */ DRWAV_API void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting signed 16-bit PCM samples to IEEE 32-bit floating point samples. */ DRWAV_API void drwav_s16_to_f32(float* pOut, const drwav_int16* pIn, size_t sampleCount); /* Low-level function for converting signed 24-bit PCM samples to IEEE 32-bit floating point samples. */ DRWAV_API void drwav_s24_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting signed 32-bit PCM samples to IEEE 32-bit floating point samples. */ DRWAV_API void drwav_s32_to_f32(float* pOut, const drwav_int32* pIn, size_t sampleCount); /* Low-level function for converting IEEE 64-bit floating point samples to IEEE 32-bit floating point samples. */ DRWAV_API void drwav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount); /* Low-level function for converting A-law samples to IEEE 32-bit floating point samples. */ DRWAV_API void drwav_alaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting u-law samples to IEEE 32-bit floating point samples. */ DRWAV_API void drwav_mulaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Reads a chunk of audio data and converts it to signed 32-bit PCM samples. pBufferOut can be NULL in which case a seek will be performed. Returns the number of PCM frames actually read. If the return value is less than <framesToRead> it means the end of the file has been reached. */ DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut); DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut); /* Low-level function for converting unsigned 8-bit PCM samples to signed 32-bit PCM samples. */ DRWAV_API void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting signed 16-bit PCM samples to signed 32-bit PCM samples. */ DRWAV_API void drwav_s16_to_s32(drwav_int32* pOut, const drwav_int16* pIn, size_t sampleCount); /* Low-level function for converting signed 24-bit PCM samples to signed 32-bit PCM samples. */ DRWAV_API void drwav_s24_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting IEEE 32-bit floating point samples to signed 32-bit PCM samples. */ DRWAV_API void drwav_f32_to_s32(drwav_int32* pOut, const float* pIn, size_t sampleCount); /* Low-level function for converting IEEE 64-bit floating point samples to signed 32-bit PCM samples. */ DRWAV_API void drwav_f64_to_s32(drwav_int32* pOut, const double* pIn, size_t sampleCount); /* Low-level function for converting A-law samples to signed 32-bit PCM samples. */ DRWAV_API void drwav_alaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); /* Low-level function for converting u-law samples to signed 32-bit PCM samples. */ DRWAV_API void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount); #endif /* DR_WAV_NO_CONVERSION_API */ /* High-Level Convenience Helpers */ #ifndef DR_WAV_NO_STDIO /* Helper for initializing a wave file for reading using stdio. This holds the internal FILE object until drwav_uninit() is called. Keep this in mind if you're caching drwav objects because the operating system may restrict the number of file handles an application can have open at any given time. */ DRWAV_API drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* filename, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_with_metadata(drwav* pWav, const char* filename, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_with_metadata_w(drwav* pWav, const wchar_t* filename, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); /* Helper for initializing a wave file for writing using stdio. This holds the internal FILE object until drwav_uninit() is called. Keep this in mind if you're caching drwav objects because the operating system may restrict the number of file handles an application can have open at any given time. */ DRWAV_API drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks); #endif /* DR_WAV_NO_STDIO */ /* Helper for initializing a loader from a pre-allocated memory buffer. This does not create a copy of the data. It is up to the application to ensure the buffer remains valid for the lifetime of the drwav object. The buffer should contain the contents of the entire wave file, not just the sample data. */ DRWAV_API drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_memory_with_metadata(drwav* pWav, const void* data, size_t dataSize, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks); /* Helper for initializing a writer which outputs data to a memory buffer. dr_wav will manage the memory allocations, however it is up to the caller to free the data with drwav_free(). The buffer will remain allocated even after drwav_uninit() is called. The buffer should not be considered valid until after drwav_uninit() has been called. */ DRWAV_API drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks); #ifndef DR_WAV_NO_CONVERSION_API /* Opens and reads an entire wav file in a single operation. The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer. */ DRWAV_API drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); #ifndef DR_WAV_NO_STDIO /* Opens and decodes an entire wav file in a single operation. The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer. */ DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); #endif /* Opens and decodes an entire wav file from a block of memory in a single operation. The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer. */ DRWAV_API drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); DRWAV_API drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks); #endif /* Frees data that was allocated internally by dr_wav. */ DRWAV_API void drwav_free(void* p, const drwav_allocation_callbacks* pAllocationCallbacks); /* Converts bytes from a wav stream to a sized type of native endian. */ DRWAV_API drwav_uint16 drwav_bytes_to_u16(const drwav_uint8* data); DRWAV_API drwav_int16 drwav_bytes_to_s16(const drwav_uint8* data); DRWAV_API drwav_uint32 drwav_bytes_to_u32(const drwav_uint8* data); DRWAV_API drwav_int32 drwav_bytes_to_s32(const drwav_uint8* data); DRWAV_API drwav_uint64 drwav_bytes_to_u64(const drwav_uint8* data); DRWAV_API drwav_int64 drwav_bytes_to_s64(const drwav_uint8* data); DRWAV_API float drwav_bytes_to_f32(const drwav_uint8* data); /* Compares a GUID for the purpose of checking the type of a Wave64 chunk. */ DRWAV_API drwav_bool32 drwav_guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16]); /* Compares a four-character-code for the purpose of checking the type of a RIFF chunk. */ DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b); #ifdef __cplusplus } #endif #endif /* dr_wav_h */ /************************************************************************************************************************************************************ ************************************************************************************************************************************************************ IMPLEMENTATION ************************************************************************************************************************************************************ ************************************************************************************************************************************************************/ #if defined(DR_WAV_IMPLEMENTATION) || defined(DRWAV_IMPLEMENTATION) #ifndef dr_wav_c #define dr_wav_c #ifdef __MRC__ /* MrC currently doesn't compile dr_wav correctly with any optimizations enabled. */ #pragma options opt off #endif #include <stdlib.h> #include <string.h> #include <limits.h> /* For INT_MAX */ #ifndef DR_WAV_NO_STDIO #include <stdio.h> #ifndef DR_WAV_NO_WCHAR #include <wchar.h> #endif #endif /* Standard library stuff. */ #ifndef DRWAV_ASSERT #include <assert.h> #define DRWAV_ASSERT(expression) assert(expression) #endif #ifndef DRWAV_MALLOC #define DRWAV_MALLOC(sz) malloc((sz)) #endif #ifndef DRWAV_REALLOC #define DRWAV_REALLOC(p, sz) realloc((p), (sz)) #endif #ifndef DRWAV_FREE #define DRWAV_FREE(p) free((p)) #endif #ifndef DRWAV_COPY_MEMORY #define DRWAV_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) #endif #ifndef DRWAV_ZERO_MEMORY #define DRWAV_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) #endif #ifndef DRWAV_ZERO_OBJECT #define DRWAV_ZERO_OBJECT(p) DRWAV_ZERO_MEMORY((p), sizeof(*p)) #endif #define drwav_countof(x) (sizeof(x) / sizeof(x[0])) #define drwav_align(x, a) ((((x) + (a) - 1) / (a)) * (a)) #define drwav_min(a, b) (((a) < (b)) ? (a) : (b)) #define drwav_max(a, b) (((a) > (b)) ? (a) : (b)) #define drwav_clamp(x, lo, hi) (drwav_max((lo), drwav_min((hi), (x)))) #define drwav_offset_ptr(p, offset) (((drwav_uint8*)(p)) + (offset)) #define DRWAV_MAX_SIMD_VECTOR_SIZE 32 /* Architecture Detection */ #if defined(__x86_64__) || defined(_M_X64) #define DRWAV_X64 #elif defined(__i386) || defined(_M_IX86) #define DRWAV_X86 #elif defined(__arm__) || defined(_M_ARM) #define DRWAV_ARM #endif /* End Architecture Detection */ /* Inline */ #ifdef _MSC_VER #define DRWAV_INLINE __forceinline #elif defined(__GNUC__) /* I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue I am using "__inline__" only when we're compiling in strict ANSI mode. */ #if defined(__STRICT_ANSI__) #define DRWAV_GNUC_INLINE_HINT __inline__ #else #define DRWAV_GNUC_INLINE_HINT inline #endif #if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 2)) || defined(__clang__) #define DRWAV_INLINE DRWAV_GNUC_INLINE_HINT __attribute__((always_inline)) #else #define DRWAV_INLINE DRWAV_GNUC_INLINE_HINT #endif #elif defined(__WATCOMC__) #define DRWAV_INLINE __inline #else #define DRWAV_INLINE #endif /* End Inline */ /* SIZE_MAX */ #if defined(SIZE_MAX) #define DRWAV_SIZE_MAX SIZE_MAX #else #if defined(_WIN64) || defined(_LP64) || defined(__LP64__) #define DRWAV_SIZE_MAX ((drwav_uint64)0xFFFFFFFFFFFFFFFF) #else #define DRWAV_SIZE_MAX 0xFFFFFFFF #endif #endif /* End SIZE_MAX */ /* Weird bit manipulation is for C89 compatibility (no direct support for 64-bit integers). */ #define DRWAV_INT64_MIN ((drwav_int64) ((drwav_uint64)0x80000000 << 32)) #define DRWAV_INT64_MAX ((drwav_int64)(((drwav_uint64)0x7FFFFFFF << 32) | 0xFFFFFFFF)) #if defined(_MSC_VER) && _MSC_VER >= 1400 #define DRWAV_HAS_BYTESWAP16_INTRINSIC #define DRWAV_HAS_BYTESWAP32_INTRINSIC #define DRWAV_HAS_BYTESWAP64_INTRINSIC #elif defined(__clang__) #if defined(__has_builtin) #if __has_builtin(__builtin_bswap16) #define DRWAV_HAS_BYTESWAP16_INTRINSIC #endif #if __has_builtin(__builtin_bswap32) #define DRWAV_HAS_BYTESWAP32_INTRINSIC #endif #if __has_builtin(__builtin_bswap64) #define DRWAV_HAS_BYTESWAP64_INTRINSIC #endif #endif #elif defined(__GNUC__) #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define DRWAV_HAS_BYTESWAP32_INTRINSIC #define DRWAV_HAS_BYTESWAP64_INTRINSIC #endif #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) #define DRWAV_HAS_BYTESWAP16_INTRINSIC #endif #endif DRWAV_API void drwav_version(drwav_uint32* pMajor, drwav_uint32* pMinor, drwav_uint32* pRevision) { if (pMajor) { *pMajor = DRWAV_VERSION_MAJOR; } if (pMinor) { *pMinor = DRWAV_VERSION_MINOR; } if (pRevision) { *pRevision = DRWAV_VERSION_REVISION; } } DRWAV_API const char* drwav_version_string(void) { return DRWAV_VERSION_STRING; } /* These limits are used for basic validation when initializing the decoder. If you exceed these limits, first of all: what on Earth are you doing?! (Let me know, I'd be curious!) Second, you can adjust these by #define-ing them before the dr_wav implementation. */ #ifndef DRWAV_MAX_SAMPLE_RATE #define DRWAV_MAX_SAMPLE_RATE 384000 #endif #ifndef DRWAV_MAX_CHANNELS #define DRWAV_MAX_CHANNELS 256 #endif #ifndef DRWAV_MAX_BITS_PER_SAMPLE #define DRWAV_MAX_BITS_PER_SAMPLE 64 #endif static const drwav_uint8 drwavGUID_W64_RIFF[16] = {0x72,0x69,0x66,0x66, 0x2E,0x91, 0xCF,0x11, 0xA5,0xD6, 0x28,0xDB,0x04,0xC1,0x00,0x00}; /* 66666972-912E-11CF-A5D6-28DB04C10000 */ static const drwav_uint8 drwavGUID_W64_WAVE[16] = {0x77,0x61,0x76,0x65, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 65766177-ACF3-11D3-8CD1-00C04F8EDB8A */ /*static const drwav_uint8 drwavGUID_W64_JUNK[16] = {0x6A,0x75,0x6E,0x6B, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};*/ /* 6B6E756A-ACF3-11D3-8CD1-00C04F8EDB8A */ static const drwav_uint8 drwavGUID_W64_FMT [16] = {0x66,0x6D,0x74,0x20, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 20746D66-ACF3-11D3-8CD1-00C04F8EDB8A */ static const drwav_uint8 drwavGUID_W64_FACT[16] = {0x66,0x61,0x63,0x74, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 74636166-ACF3-11D3-8CD1-00C04F8EDB8A */ static const drwav_uint8 drwavGUID_W64_DATA[16] = {0x64,0x61,0x74,0x61, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 61746164-ACF3-11D3-8CD1-00C04F8EDB8A */ /*static const drwav_uint8 drwavGUID_W64_SMPL[16] = {0x73,0x6D,0x70,0x6C, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A};*/ /* 6C706D73-ACF3-11D3-8CD1-00C04F8EDB8A */ static DRWAV_INLINE int drwav__is_little_endian(void) { #if defined(DRWAV_X86) || defined(DRWAV_X64) return DRWAV_TRUE; #elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN return DRWAV_TRUE; #else int n = 1; return (*(char*)&n) == 1; #endif } static DRWAV_INLINE void drwav_bytes_to_guid(const drwav_uint8* data, drwav_uint8* guid) { int i; for (i = 0; i < 16; ++i) { guid[i] = data[i]; } } static DRWAV_INLINE drwav_uint16 drwav__bswap16(drwav_uint16 n) { #ifdef DRWAV_HAS_BYTESWAP16_INTRINSIC #if defined(_MSC_VER) return _byteswap_ushort(n); #elif defined(__GNUC__) || defined(__clang__) return __builtin_bswap16(n); #else #error "This compiler does not support the byte swap intrinsic." #endif #else return ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); #endif } static DRWAV_INLINE drwav_uint32 drwav__bswap32(drwav_uint32 n) { #ifdef DRWAV_HAS_BYTESWAP32_INTRINSIC #if defined(_MSC_VER) return _byteswap_ulong(n); #elif defined(__GNUC__) || defined(__clang__) #if defined(DRWAV_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(DRWAV_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */ /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */ drwav_uint32 r; __asm__ __volatile__ ( #if defined(DRWAV_64BIT) "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) /* <-- This is untested. If someone in the community could test this, that would be appreciated! */ #else "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) #endif ); return r; #else return __builtin_bswap32(n); #endif #else #error "This compiler does not support the byte swap intrinsic." #endif #else return ((n & 0xFF000000) >> 24) | ((n & 0x00FF0000) >> 8) | ((n & 0x0000FF00) << 8) | ((n & 0x000000FF) << 24); #endif } static DRWAV_INLINE drwav_uint64 drwav__bswap64(drwav_uint64 n) { #ifdef DRWAV_HAS_BYTESWAP64_INTRINSIC #if defined(_MSC_VER) return _byteswap_uint64(n); #elif defined(__GNUC__) || defined(__clang__) return __builtin_bswap64(n); #else #error "This compiler does not support the byte swap intrinsic." #endif #else /* Weird "<< 32" bitshift is required for C89 because it doesn't support 64-bit constants. Should be optimized out by a good compiler. */ return ((n & ((drwav_uint64)0xFF000000 << 32)) >> 56) | ((n & ((drwav_uint64)0x00FF0000 << 32)) >> 40) | ((n & ((drwav_uint64)0x0000FF00 << 32)) >> 24) | ((n & ((drwav_uint64)0x000000FF << 32)) >> 8) | ((n & ((drwav_uint64)0xFF000000 )) << 8) | ((n & ((drwav_uint64)0x00FF0000 )) << 24) | ((n & ((drwav_uint64)0x0000FF00 )) << 40) | ((n & ((drwav_uint64)0x000000FF )) << 56); #endif } static DRWAV_INLINE drwav_int16 drwav__bswap_s16(drwav_int16 n) { return (drwav_int16)drwav__bswap16((drwav_uint16)n); } static DRWAV_INLINE void drwav__bswap_samples_s16(drwav_int16* pSamples, drwav_uint64 sampleCount) { drwav_uint64 iSample; for (iSample = 0; iSample < sampleCount; iSample += 1) { pSamples[iSample] = drwav__bswap_s16(pSamples[iSample]); } } static DRWAV_INLINE void drwav__bswap_s24(drwav_uint8* p) { drwav_uint8 t; t = p[0]; p[0] = p[2]; p[2] = t; } static DRWAV_INLINE void drwav__bswap_samples_s24(drwav_uint8* pSamples, drwav_uint64 sampleCount) { drwav_uint64 iSample; for (iSample = 0; iSample < sampleCount; iSample += 1) { drwav_uint8* pSample = pSamples + (iSample*3); drwav__bswap_s24(pSample); } } static DRWAV_INLINE drwav_int32 drwav__bswap_s32(drwav_int32 n) { return (drwav_int32)drwav__bswap32((drwav_uint32)n); } static DRWAV_INLINE void drwav__bswap_samples_s32(drwav_int32* pSamples, drwav_uint64 sampleCount) { drwav_uint64 iSample; for (iSample = 0; iSample < sampleCount; iSample += 1) { pSamples[iSample] = drwav__bswap_s32(pSamples[iSample]); } } static DRWAV_INLINE drwav_int64 drwav__bswap_s64(drwav_int64 n) { return (drwav_int64)drwav__bswap64((drwav_uint64)n); } static DRWAV_INLINE void drwav__bswap_samples_s64(drwav_int64* pSamples, drwav_uint64 sampleCount) { drwav_uint64 iSample; for (iSample = 0; iSample < sampleCount; iSample += 1) { pSamples[iSample] = drwav__bswap_s64(pSamples[iSample]); } } static DRWAV_INLINE float drwav__bswap_f32(float n) { union { drwav_uint32 i; float f; } x; x.f = n; x.i = drwav__bswap32(x.i); return x.f; } static DRWAV_INLINE void drwav__bswap_samples_f32(float* pSamples, drwav_uint64 sampleCount) { drwav_uint64 iSample; for (iSample = 0; iSample < sampleCount; iSample += 1) { pSamples[iSample] = drwav__bswap_f32(pSamples[iSample]); } } static DRWAV_INLINE void drwav__bswap_samples(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample) { switch (bytesPerSample) { case 1: { /* No-op. */ } break; case 2: { drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount); } break; case 3: { drwav__bswap_samples_s24((drwav_uint8*)pSamples, sampleCount); } break; case 4: { drwav__bswap_samples_s32((drwav_int32*)pSamples, sampleCount); } break; case 8: { drwav__bswap_samples_s64((drwav_int64*)pSamples, sampleCount); } break; default: { /* Unsupported format. */ DRWAV_ASSERT(DRWAV_FALSE); } break; } } DRWAV_PRIVATE DRWAV_INLINE drwav_bool32 drwav_is_container_be(drwav_container container) { if (container == drwav_container_rifx || container == drwav_container_aiff) { return DRWAV_TRUE; } else { return DRWAV_FALSE; } } DRWAV_PRIVATE DRWAV_INLINE drwav_uint16 drwav_bytes_to_u16_le(const drwav_uint8* data) { return ((drwav_uint16)data[0] << 0) | ((drwav_uint16)data[1] << 8); } DRWAV_PRIVATE DRWAV_INLINE drwav_uint16 drwav_bytes_to_u16_be(const drwav_uint8* data) { return ((drwav_uint16)data[1] << 0) | ((drwav_uint16)data[0] << 8); } DRWAV_PRIVATE DRWAV_INLINE drwav_uint16 drwav_bytes_to_u16_ex(const drwav_uint8* data, drwav_container container) { if (drwav_is_container_be(container)) { return drwav_bytes_to_u16_be(data); } else { return drwav_bytes_to_u16_le(data); } } DRWAV_PRIVATE DRWAV_INLINE drwav_uint32 drwav_bytes_to_u32_le(const drwav_uint8* data) { return ((drwav_uint32)data[0] << 0) | ((drwav_uint32)data[1] << 8) | ((drwav_uint32)data[2] << 16) | ((drwav_uint32)data[3] << 24); } DRWAV_PRIVATE DRWAV_INLINE drwav_uint32 drwav_bytes_to_u32_be(const drwav_uint8* data) { return ((drwav_uint32)data[3] << 0) | ((drwav_uint32)data[2] << 8) | ((drwav_uint32)data[1] << 16) | ((drwav_uint32)data[0] << 24); } DRWAV_PRIVATE DRWAV_INLINE drwav_uint32 drwav_bytes_to_u32_ex(const drwav_uint8* data, drwav_container container) { if (drwav_is_container_be(container)) { return drwav_bytes_to_u32_be(data); } else { return drwav_bytes_to_u32_le(data); } } DRWAV_PRIVATE drwav_int64 drwav_aiff_extented_to_s64(const drwav_uint8* data) { drwav_uint32 exponent = ((drwav_uint32)data[0] << 8) | data[1]; drwav_uint64 hi = ((drwav_uint64)data[2] << 24) | ((drwav_uint64)data[3] << 16) | ((drwav_uint64)data[4] << 8) | ((drwav_uint64)data[5] << 0); drwav_uint64 lo = ((drwav_uint64)data[6] << 24) | ((drwav_uint64)data[7] << 16) | ((drwav_uint64)data[8] << 8) | ((drwav_uint64)data[9] << 0); drwav_uint64 significand = (hi << 32) | lo; int sign = exponent >> 15; /* Remove sign bit. */ exponent &= 0x7FFF; /* Special cases. */ if (exponent == 0 && significand == 0) { return 0; } else if (exponent == 0x7FFF) { return sign ? DRWAV_INT64_MIN : DRWAV_INT64_MAX; /* Infinite. */ } exponent -= 16383; if (exponent > 63) { return sign ? DRWAV_INT64_MIN : DRWAV_INT64_MAX; /* Too big for a 64-bit integer. */ } else if (exponent < 1) { return 0; /* Number is less than 1, so rounds down to 0. */ } significand >>= (63 - exponent); if (sign) { return -(drwav_int64)significand; } else { return (drwav_int64)significand; } } DRWAV_PRIVATE void* drwav__malloc_default(size_t sz, void* pUserData) { (void)pUserData; return DRWAV_MALLOC(sz); } DRWAV_PRIVATE void* drwav__realloc_default(void* p, size_t sz, void* pUserData) { (void)pUserData; return DRWAV_REALLOC(p, sz); } DRWAV_PRIVATE void drwav__free_default(void* p, void* pUserData) { (void)pUserData; DRWAV_FREE(p); } DRWAV_PRIVATE void* drwav__malloc_from_callbacks(size_t sz, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pAllocationCallbacks == NULL) { return NULL; } if (pAllocationCallbacks->onMalloc != NULL) { return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); } /* Try using realloc(). */ if (pAllocationCallbacks->onRealloc != NULL) { return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); } return NULL; } DRWAV_PRIVATE void* drwav__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pAllocationCallbacks == NULL) { return NULL; } if (pAllocationCallbacks->onRealloc != NULL) { return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); } /* Try emulating realloc() in terms of malloc()/free(). */ if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { void* p2; p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); if (p2 == NULL) { return NULL; } if (p != NULL) { DRWAV_COPY_MEMORY(p2, p, szOld); pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); } return p2; } return NULL; } DRWAV_PRIVATE void drwav__free_from_callbacks(void* p, const drwav_allocation_callbacks* pAllocationCallbacks) { if (p == NULL || pAllocationCallbacks == NULL) { return; } if (pAllocationCallbacks->onFree != NULL) { pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); } } DRWAV_PRIVATE drwav_allocation_callbacks drwav_copy_allocation_callbacks_or_defaults(const drwav_allocation_callbacks* pAllocationCallbacks) { if (pAllocationCallbacks != NULL) { /* Copy. */ return *pAllocationCallbacks; } else { /* Defaults. */ drwav_allocation_callbacks allocationCallbacks; allocationCallbacks.pUserData = NULL; allocationCallbacks.onMalloc = drwav__malloc_default; allocationCallbacks.onRealloc = drwav__realloc_default; allocationCallbacks.onFree = drwav__free_default; return allocationCallbacks; } } static DRWAV_INLINE drwav_bool32 drwav__is_compressed_format_tag(drwav_uint16 formatTag) { return formatTag == DR_WAVE_FORMAT_ADPCM || formatTag == DR_WAVE_FORMAT_DVI_ADPCM; } DRWAV_PRIVATE unsigned int drwav__chunk_padding_size_riff(drwav_uint64 chunkSize) { return (unsigned int)(chunkSize % 2); } DRWAV_PRIVATE unsigned int drwav__chunk_padding_size_w64(drwav_uint64 chunkSize) { return (unsigned int)(chunkSize % 8); } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut); DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut); DRWAV_PRIVATE drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount); DRWAV_PRIVATE drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_chunk_header* pHeaderOut) { if (container == drwav_container_riff || container == drwav_container_rifx || container == drwav_container_rf64 || container == drwav_container_aiff) { drwav_uint8 sizeInBytes[4]; if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) { return DRWAV_AT_END; } if (onRead(pUserData, sizeInBytes, 4) != 4) { return DRWAV_INVALID_FILE; } pHeaderOut->sizeInBytes = drwav_bytes_to_u32_ex(sizeInBytes, container); pHeaderOut->paddingSize = drwav__chunk_padding_size_riff(pHeaderOut->sizeInBytes); *pRunningBytesReadOut += 8; } else if (container == drwav_container_w64) { drwav_uint8 sizeInBytes[8]; if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) { return DRWAV_AT_END; } if (onRead(pUserData, sizeInBytes, 8) != 8) { return DRWAV_INVALID_FILE; } pHeaderOut->sizeInBytes = drwav_bytes_to_u64(sizeInBytes) - 24; /* <-- Subtract 24 because w64 includes the size of the header. */ pHeaderOut->paddingSize = drwav__chunk_padding_size_w64(pHeaderOut->sizeInBytes); *pRunningBytesReadOut += 24; } else { return DRWAV_INVALID_FILE; } return DRWAV_SUCCESS; } DRWAV_PRIVATE drwav_bool32 drwav__seek_forward(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData) { drwav_uint64 bytesRemainingToSeek = offset; while (bytesRemainingToSeek > 0) { if (bytesRemainingToSeek > 0x7FFFFFFF) { if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) { return DRWAV_FALSE; } bytesRemainingToSeek -= 0x7FFFFFFF; } else { if (!onSeek(pUserData, (int)bytesRemainingToSeek, drwav_seek_origin_current)) { return DRWAV_FALSE; } bytesRemainingToSeek = 0; } } return DRWAV_TRUE; } DRWAV_PRIVATE drwav_bool32 drwav__seek_from_start(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData) { if (offset <= 0x7FFFFFFF) { return onSeek(pUserData, (int)offset, drwav_seek_origin_start); } /* Larger than 32-bit seek. */ if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_start)) { return DRWAV_FALSE; } offset -= 0x7FFFFFFF; for (;;) { if (offset <= 0x7FFFFFFF) { return onSeek(pUserData, (int)offset, drwav_seek_origin_current); } if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) { return DRWAV_FALSE; } offset -= 0x7FFFFFFF; } /* Should never get here. */ /*return DRWAV_TRUE; */ } DRWAV_PRIVATE size_t drwav__on_read(drwav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, drwav_uint64* pCursor) { size_t bytesRead; DRWAV_ASSERT(onRead != NULL); DRWAV_ASSERT(pCursor != NULL); bytesRead = onRead(pUserData, pBufferOut, bytesToRead); *pCursor += bytesRead; return bytesRead; } #if 0 DRWAV_PRIVATE drwav_bool32 drwav__on_seek(drwav_seek_proc onSeek, void* pUserData, int offset, drwav_seek_origin origin, drwav_uint64* pCursor) { DRWAV_ASSERT(onSeek != NULL); DRWAV_ASSERT(pCursor != NULL); if (!onSeek(pUserData, offset, origin)) { return DRWAV_FALSE; } if (origin == drwav_seek_origin_start) { *pCursor = offset; } else { *pCursor += offset; } return DRWAV_TRUE; } #endif #define DRWAV_SMPL_BYTES 36 #define DRWAV_SMPL_LOOP_BYTES 24 #define DRWAV_INST_BYTES 7 #define DRWAV_ACID_BYTES 24 #define DRWAV_CUE_BYTES 4 #define DRWAV_BEXT_BYTES 602 #define DRWAV_BEXT_DESCRIPTION_BYTES 256 #define DRWAV_BEXT_ORIGINATOR_NAME_BYTES 32 #define DRWAV_BEXT_ORIGINATOR_REF_BYTES 32 #define DRWAV_BEXT_RESERVED_BYTES 180 #define DRWAV_BEXT_UMID_BYTES 64 #define DRWAV_CUE_POINT_BYTES 24 #define DRWAV_LIST_LABEL_OR_NOTE_BYTES 4 #define DRWAV_LIST_LABELLED_TEXT_BYTES 20 #define DRWAV_METADATA_ALIGNMENT 8 typedef enum { drwav__metadata_parser_stage_count, drwav__metadata_parser_stage_read } drwav__metadata_parser_stage; typedef struct { drwav_read_proc onRead; drwav_seek_proc onSeek; void *pReadSeekUserData; drwav__metadata_parser_stage stage; drwav_metadata *pMetadata; drwav_uint32 metadataCount; drwav_uint8 *pData; drwav_uint8 *pDataCursor; drwav_uint64 metadataCursor; drwav_uint64 extraCapacity; } drwav__metadata_parser; DRWAV_PRIVATE size_t drwav__metadata_memory_capacity(drwav__metadata_parser* pParser) { drwav_uint64 cap = sizeof(drwav_metadata) * (drwav_uint64)pParser->metadataCount + pParser->extraCapacity; if (cap > DRWAV_SIZE_MAX) { return 0; /* Too big. */ } return (size_t)cap; /* Safe cast thanks to the check above. */ } DRWAV_PRIVATE drwav_uint8* drwav__metadata_get_memory(drwav__metadata_parser* pParser, size_t size, size_t align) { drwav_uint8* pResult; if (align) { drwav_uintptr modulo = (drwav_uintptr)pParser->pDataCursor % align; if (modulo != 0) { pParser->pDataCursor += align - modulo; } } pResult = pParser->pDataCursor; /* Getting to the point where this function is called means there should always be memory available. Out of memory checks should have been done at an earlier stage. */ DRWAV_ASSERT((pResult + size) <= (pParser->pData + drwav__metadata_memory_capacity(pParser))); pParser->pDataCursor += size; return pResult; } DRWAV_PRIVATE void drwav__metadata_request_extra_memory_for_stage_2(drwav__metadata_parser* pParser, size_t bytes, size_t align) { size_t extra = bytes + (align ? (align - 1) : 0); pParser->extraCapacity += extra; } DRWAV_PRIVATE drwav_result drwav__metadata_alloc(drwav__metadata_parser* pParser, drwav_allocation_callbacks* pAllocationCallbacks) { if (pParser->extraCapacity != 0 || pParser->metadataCount != 0) { pAllocationCallbacks->onFree(pParser->pData, pAllocationCallbacks->pUserData); pParser->pData = (drwav_uint8*)pAllocationCallbacks->onMalloc(drwav__metadata_memory_capacity(pParser), pAllocationCallbacks->pUserData); pParser->pDataCursor = pParser->pData; if (pParser->pData == NULL) { return DRWAV_OUT_OF_MEMORY; } /* We don't need to worry about specifying an alignment here because malloc always returns something of suitable alignment. This also means pParser->pMetadata is all that we need to store in order for us to free when we are done. */ pParser->pMetadata = (drwav_metadata*)drwav__metadata_get_memory(pParser, sizeof(drwav_metadata) * pParser->metadataCount, 1); pParser->metadataCursor = 0; } return DRWAV_SUCCESS; } DRWAV_PRIVATE size_t drwav__metadata_parser_read(drwav__metadata_parser* pParser, void* pBufferOut, size_t bytesToRead, drwav_uint64* pCursor) { if (pCursor != NULL) { return drwav__on_read(pParser->onRead, pParser->pReadSeekUserData, pBufferOut, bytesToRead, pCursor); } else { return pParser->onRead(pParser->pReadSeekUserData, pBufferOut, bytesToRead); } } DRWAV_PRIVATE drwav_uint64 drwav__read_smpl_to_metadata_obj(drwav__metadata_parser* pParser, const drwav_chunk_header* pChunkHeader, drwav_metadata* pMetadata) { drwav_uint8 smplHeaderData[DRWAV_SMPL_BYTES]; drwav_uint64 totalBytesRead = 0; size_t bytesJustRead; if (pMetadata == NULL) { return 0; } bytesJustRead = drwav__metadata_parser_read(pParser, smplHeaderData, sizeof(smplHeaderData), &totalBytesRead); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); DRWAV_ASSERT(pChunkHeader != NULL); if (pMetadata != NULL && bytesJustRead == sizeof(smplHeaderData)) { drwav_uint32 iSampleLoop; pMetadata->type = drwav_metadata_type_smpl; pMetadata->data.smpl.manufacturerId = drwav_bytes_to_u32(smplHeaderData + 0); pMetadata->data.smpl.productId = drwav_bytes_to_u32(smplHeaderData + 4); pMetadata->data.smpl.samplePeriodNanoseconds = drwav_bytes_to_u32(smplHeaderData + 8); pMetadata->data.smpl.midiUnityNote = drwav_bytes_to_u32(smplHeaderData + 12); pMetadata->data.smpl.midiPitchFraction = drwav_bytes_to_u32(smplHeaderData + 16); pMetadata->data.smpl.smpteFormat = drwav_bytes_to_u32(smplHeaderData + 20); pMetadata->data.smpl.smpteOffset = drwav_bytes_to_u32(smplHeaderData + 24); pMetadata->data.smpl.sampleLoopCount = drwav_bytes_to_u32(smplHeaderData + 28); pMetadata->data.smpl.samplerSpecificDataSizeInBytes = drwav_bytes_to_u32(smplHeaderData + 32); /* The loop count needs to be validated against the size of the chunk for safety so we don't attempt to read over the boundary of the chunk. */ if (pMetadata->data.smpl.sampleLoopCount == (pChunkHeader->sizeInBytes - DRWAV_SMPL_BYTES) / DRWAV_SMPL_LOOP_BYTES) { pMetadata->data.smpl.pLoops = (drwav_smpl_loop*)drwav__metadata_get_memory(pParser, sizeof(drwav_smpl_loop) * pMetadata->data.smpl.sampleLoopCount, DRWAV_METADATA_ALIGNMENT); for (iSampleLoop = 0; iSampleLoop < pMetadata->data.smpl.sampleLoopCount; ++iSampleLoop) { drwav_uint8 smplLoopData[DRWAV_SMPL_LOOP_BYTES]; bytesJustRead = drwav__metadata_parser_read(pParser, smplLoopData, sizeof(smplLoopData), &totalBytesRead); if (bytesJustRead == sizeof(smplLoopData)) { pMetadata->data.smpl.pLoops[iSampleLoop].cuePointId = drwav_bytes_to_u32(smplLoopData + 0); pMetadata->data.smpl.pLoops[iSampleLoop].type = drwav_bytes_to_u32(smplLoopData + 4); pMetadata->data.smpl.pLoops[iSampleLoop].firstSampleByteOffset = drwav_bytes_to_u32(smplLoopData + 8); pMetadata->data.smpl.pLoops[iSampleLoop].lastSampleByteOffset = drwav_bytes_to_u32(smplLoopData + 12); pMetadata->data.smpl.pLoops[iSampleLoop].sampleFraction = drwav_bytes_to_u32(smplLoopData + 16); pMetadata->data.smpl.pLoops[iSampleLoop].playCount = drwav_bytes_to_u32(smplLoopData + 20); } else { break; } } if (pMetadata->data.smpl.samplerSpecificDataSizeInBytes > 0) { pMetadata->data.smpl.pSamplerSpecificData = drwav__metadata_get_memory(pParser, pMetadata->data.smpl.samplerSpecificDataSizeInBytes, 1); DRWAV_ASSERT(pMetadata->data.smpl.pSamplerSpecificData != NULL); drwav__metadata_parser_read(pParser, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes, &totalBytesRead); } } } return totalBytesRead; } DRWAV_PRIVATE drwav_uint64 drwav__read_cue_to_metadata_obj(drwav__metadata_parser* pParser, const drwav_chunk_header* pChunkHeader, drwav_metadata* pMetadata) { drwav_uint8 cueHeaderSectionData[DRWAV_CUE_BYTES]; drwav_uint64 totalBytesRead = 0; size_t bytesJustRead; if (pMetadata == NULL) { return 0; } bytesJustRead = drwav__metadata_parser_read(pParser, cueHeaderSectionData, sizeof(cueHeaderSectionData), &totalBytesRead); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); if (bytesJustRead == sizeof(cueHeaderSectionData)) { pMetadata->type = drwav_metadata_type_cue; pMetadata->data.cue.cuePointCount = drwav_bytes_to_u32(cueHeaderSectionData); /* We need to validate the cue point count against the size of the chunk so we don't read beyond the chunk. */ if (pMetadata->data.cue.cuePointCount == (pChunkHeader->sizeInBytes - DRWAV_CUE_BYTES) / DRWAV_CUE_POINT_BYTES) { pMetadata->data.cue.pCuePoints = (drwav_cue_point*)drwav__metadata_get_memory(pParser, sizeof(drwav_cue_point) * pMetadata->data.cue.cuePointCount, DRWAV_METADATA_ALIGNMENT); DRWAV_ASSERT(pMetadata->data.cue.pCuePoints != NULL); if (pMetadata->data.cue.cuePointCount > 0) { drwav_uint32 iCuePoint; for (iCuePoint = 0; iCuePoint < pMetadata->data.cue.cuePointCount; ++iCuePoint) { drwav_uint8 cuePointData[DRWAV_CUE_POINT_BYTES]; bytesJustRead = drwav__metadata_parser_read(pParser, cuePointData, sizeof(cuePointData), &totalBytesRead); if (bytesJustRead == sizeof(cuePointData)) { pMetadata->data.cue.pCuePoints[iCuePoint].id = drwav_bytes_to_u32(cuePointData + 0); pMetadata->data.cue.pCuePoints[iCuePoint].playOrderPosition = drwav_bytes_to_u32(cuePointData + 4); pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[0] = cuePointData[8]; pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[1] = cuePointData[9]; pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[2] = cuePointData[10]; pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[3] = cuePointData[11]; pMetadata->data.cue.pCuePoints[iCuePoint].chunkStart = drwav_bytes_to_u32(cuePointData + 12); pMetadata->data.cue.pCuePoints[iCuePoint].blockStart = drwav_bytes_to_u32(cuePointData + 16); pMetadata->data.cue.pCuePoints[iCuePoint].sampleByteOffset = drwav_bytes_to_u32(cuePointData + 20); } else { break; } } } } } return totalBytesRead; } DRWAV_PRIVATE drwav_uint64 drwav__read_inst_to_metadata_obj(drwav__metadata_parser* pParser, drwav_metadata* pMetadata) { drwav_uint8 instData[DRWAV_INST_BYTES]; drwav_uint64 bytesRead; if (pMetadata == NULL) { return 0; } bytesRead = drwav__metadata_parser_read(pParser, instData, sizeof(instData), NULL); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); if (bytesRead == sizeof(instData)) { pMetadata->type = drwav_metadata_type_inst; pMetadata->data.inst.midiUnityNote = (drwav_int8)instData[0]; pMetadata->data.inst.fineTuneCents = (drwav_int8)instData[1]; pMetadata->data.inst.gainDecibels = (drwav_int8)instData[2]; pMetadata->data.inst.lowNote = (drwav_int8)instData[3]; pMetadata->data.inst.highNote = (drwav_int8)instData[4]; pMetadata->data.inst.lowVelocity = (drwav_int8)instData[5]; pMetadata->data.inst.highVelocity = (drwav_int8)instData[6]; } return bytesRead; } DRWAV_PRIVATE drwav_uint64 drwav__read_acid_to_metadata_obj(drwav__metadata_parser* pParser, drwav_metadata* pMetadata) { drwav_uint8 acidData[DRWAV_ACID_BYTES]; drwav_uint64 bytesRead; if (pMetadata == NULL) { return 0; } bytesRead = drwav__metadata_parser_read(pParser, acidData, sizeof(acidData), NULL); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); if (bytesRead == sizeof(acidData)) { pMetadata->type = drwav_metadata_type_acid; pMetadata->data.acid.flags = drwav_bytes_to_u32(acidData + 0); pMetadata->data.acid.midiUnityNote = drwav_bytes_to_u16(acidData + 4); pMetadata->data.acid.reserved1 = drwav_bytes_to_u16(acidData + 6); pMetadata->data.acid.reserved2 = drwav_bytes_to_f32(acidData + 8); pMetadata->data.acid.numBeats = drwav_bytes_to_u32(acidData + 12); pMetadata->data.acid.meterDenominator = drwav_bytes_to_u16(acidData + 16); pMetadata->data.acid.meterNumerator = drwav_bytes_to_u16(acidData + 18); pMetadata->data.acid.tempo = drwav_bytes_to_f32(acidData + 20); } return bytesRead; } DRWAV_PRIVATE size_t drwav__strlen(const char* str) { size_t result = 0; while (*str++) { result += 1; } return result; } DRWAV_PRIVATE size_t drwav__strlen_clamped(const char* str, size_t maxToRead) { size_t result = 0; while (*str++ && result < maxToRead) { result += 1; } return result; } DRWAV_PRIVATE char* drwav__metadata_copy_string(drwav__metadata_parser* pParser, const char* str, size_t maxToRead) { size_t len = drwav__strlen_clamped(str, maxToRead); if (len) { char* result = (char*)drwav__metadata_get_memory(pParser, len + 1, 1); DRWAV_ASSERT(result != NULL); DRWAV_COPY_MEMORY(result, str, len); result[len] = '\0'; return result; } else { return NULL; } } typedef struct { const void* pBuffer; size_t sizeInBytes; size_t cursor; } drwav_buffer_reader; DRWAV_PRIVATE drwav_result drwav_buffer_reader_init(const void* pBuffer, size_t sizeInBytes, drwav_buffer_reader* pReader) { DRWAV_ASSERT(pBuffer != NULL); DRWAV_ASSERT(pReader != NULL); DRWAV_ZERO_OBJECT(pReader); pReader->pBuffer = pBuffer; pReader->sizeInBytes = sizeInBytes; pReader->cursor = 0; return DRWAV_SUCCESS; } DRWAV_PRIVATE const void* drwav_buffer_reader_ptr(const drwav_buffer_reader* pReader) { DRWAV_ASSERT(pReader != NULL); return drwav_offset_ptr(pReader->pBuffer, pReader->cursor); } DRWAV_PRIVATE drwav_result drwav_buffer_reader_seek(drwav_buffer_reader* pReader, size_t bytesToSeek) { DRWAV_ASSERT(pReader != NULL); if (pReader->cursor + bytesToSeek > pReader->sizeInBytes) { return DRWAV_BAD_SEEK; /* Seeking too far forward. */ } pReader->cursor += bytesToSeek; return DRWAV_SUCCESS; } DRWAV_PRIVATE drwav_result drwav_buffer_reader_read(drwav_buffer_reader* pReader, void* pDst, size_t bytesToRead, size_t* pBytesRead) { drwav_result result = DRWAV_SUCCESS; size_t bytesRemaining; DRWAV_ASSERT(pReader != NULL); if (pBytesRead != NULL) { *pBytesRead = 0; } bytesRemaining = (pReader->sizeInBytes - pReader->cursor); if (bytesToRead > bytesRemaining) { bytesToRead = bytesRemaining; } if (pDst == NULL) { /* Seek. */ result = drwav_buffer_reader_seek(pReader, bytesToRead); } else { /* Read. */ DRWAV_COPY_MEMORY(pDst, drwav_buffer_reader_ptr(pReader), bytesToRead); pReader->cursor += bytesToRead; } DRWAV_ASSERT(pReader->cursor <= pReader->sizeInBytes); if (result == DRWAV_SUCCESS) { if (pBytesRead != NULL) { *pBytesRead = bytesToRead; } } return DRWAV_SUCCESS; } DRWAV_PRIVATE drwav_result drwav_buffer_reader_read_u16(drwav_buffer_reader* pReader, drwav_uint16* pDst) { drwav_result result; size_t bytesRead; drwav_uint8 data[2]; DRWAV_ASSERT(pReader != NULL); DRWAV_ASSERT(pDst != NULL); *pDst = 0; /* Safety. */ result = drwav_buffer_reader_read(pReader, data, sizeof(*pDst), &bytesRead); if (result != DRWAV_SUCCESS || bytesRead != sizeof(*pDst)) { return result; } *pDst = drwav_bytes_to_u16(data); return DRWAV_SUCCESS; } DRWAV_PRIVATE drwav_result drwav_buffer_reader_read_u32(drwav_buffer_reader* pReader, drwav_uint32* pDst) { drwav_result result; size_t bytesRead; drwav_uint8 data[4]; DRWAV_ASSERT(pReader != NULL); DRWAV_ASSERT(pDst != NULL); *pDst = 0; /* Safety. */ result = drwav_buffer_reader_read(pReader, data, sizeof(*pDst), &bytesRead); if (result != DRWAV_SUCCESS || bytesRead != sizeof(*pDst)) { return result; } *pDst = drwav_bytes_to_u32(data); return DRWAV_SUCCESS; } DRWAV_PRIVATE drwav_uint64 drwav__read_bext_to_metadata_obj(drwav__metadata_parser* pParser, drwav_metadata* pMetadata, drwav_uint64 chunkSize) { drwav_uint8 bextData[DRWAV_BEXT_BYTES]; size_t bytesRead = drwav__metadata_parser_read(pParser, bextData, sizeof(bextData), NULL); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); if (bytesRead == sizeof(bextData)) { drwav_buffer_reader reader; drwav_uint32 timeReferenceLow; drwav_uint32 timeReferenceHigh; size_t extraBytes; pMetadata->type = drwav_metadata_type_bext; if (drwav_buffer_reader_init(bextData, bytesRead, &reader) == DRWAV_SUCCESS) { pMetadata->data.bext.pDescription = drwav__metadata_copy_string(pParser, (const char*)drwav_buffer_reader_ptr(&reader), DRWAV_BEXT_DESCRIPTION_BYTES); drwav_buffer_reader_seek(&reader, DRWAV_BEXT_DESCRIPTION_BYTES); pMetadata->data.bext.pOriginatorName = drwav__metadata_copy_string(pParser, (const char*)drwav_buffer_reader_ptr(&reader), DRWAV_BEXT_ORIGINATOR_NAME_BYTES); drwav_buffer_reader_seek(&reader, DRWAV_BEXT_ORIGINATOR_NAME_BYTES); pMetadata->data.bext.pOriginatorReference = drwav__metadata_copy_string(pParser, (const char*)drwav_buffer_reader_ptr(&reader), DRWAV_BEXT_ORIGINATOR_REF_BYTES); drwav_buffer_reader_seek(&reader, DRWAV_BEXT_ORIGINATOR_REF_BYTES); drwav_buffer_reader_read(&reader, pMetadata->data.bext.pOriginationDate, sizeof(pMetadata->data.bext.pOriginationDate), NULL); drwav_buffer_reader_read(&reader, pMetadata->data.bext.pOriginationTime, sizeof(pMetadata->data.bext.pOriginationTime), NULL); drwav_buffer_reader_read_u32(&reader, &timeReferenceLow); drwav_buffer_reader_read_u32(&reader, &timeReferenceHigh); pMetadata->data.bext.timeReference = ((drwav_uint64)timeReferenceHigh << 32) + timeReferenceLow; drwav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.version); pMetadata->data.bext.pUMID = drwav__metadata_get_memory(pParser, DRWAV_BEXT_UMID_BYTES, 1); drwav_buffer_reader_read(&reader, pMetadata->data.bext.pUMID, DRWAV_BEXT_UMID_BYTES, NULL); drwav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.loudnessValue); drwav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.loudnessRange); drwav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxTruePeakLevel); drwav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxMomentaryLoudness); drwav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxShortTermLoudness); DRWAV_ASSERT((drwav_offset_ptr(drwav_buffer_reader_ptr(&reader), DRWAV_BEXT_RESERVED_BYTES)) == (bextData + DRWAV_BEXT_BYTES)); extraBytes = (size_t)(chunkSize - DRWAV_BEXT_BYTES); if (extraBytes > 0) { pMetadata->data.bext.pCodingHistory = (char*)drwav__metadata_get_memory(pParser, extraBytes + 1, 1); DRWAV_ASSERT(pMetadata->data.bext.pCodingHistory != NULL); bytesRead += drwav__metadata_parser_read(pParser, pMetadata->data.bext.pCodingHistory, extraBytes, NULL); pMetadata->data.bext.codingHistorySize = (drwav_uint32)drwav__strlen(pMetadata->data.bext.pCodingHistory); } else { pMetadata->data.bext.pCodingHistory = NULL; pMetadata->data.bext.codingHistorySize = 0; } } } return bytesRead; } DRWAV_PRIVATE drwav_uint64 drwav__read_list_label_or_note_to_metadata_obj(drwav__metadata_parser* pParser, drwav_metadata* pMetadata, drwav_uint64 chunkSize, drwav_metadata_type type) { drwav_uint8 cueIDBuffer[DRWAV_LIST_LABEL_OR_NOTE_BYTES]; drwav_uint64 totalBytesRead = 0; size_t bytesJustRead = drwav__metadata_parser_read(pParser, cueIDBuffer, sizeof(cueIDBuffer), &totalBytesRead); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); if (bytesJustRead == sizeof(cueIDBuffer)) { drwav_uint32 sizeIncludingNullTerminator; pMetadata->type = type; pMetadata->data.labelOrNote.cuePointId = drwav_bytes_to_u32(cueIDBuffer); sizeIncludingNullTerminator = (drwav_uint32)chunkSize - DRWAV_LIST_LABEL_OR_NOTE_BYTES; if (sizeIncludingNullTerminator > 0) { pMetadata->data.labelOrNote.stringLength = sizeIncludingNullTerminator - 1; pMetadata->data.labelOrNote.pString = (char*)drwav__metadata_get_memory(pParser, sizeIncludingNullTerminator, 1); DRWAV_ASSERT(pMetadata->data.labelOrNote.pString != NULL); drwav__metadata_parser_read(pParser, pMetadata->data.labelOrNote.pString, sizeIncludingNullTerminator, &totalBytesRead); } else { pMetadata->data.labelOrNote.stringLength = 0; pMetadata->data.labelOrNote.pString = NULL; } } return totalBytesRead; } DRWAV_PRIVATE drwav_uint64 drwav__read_list_labelled_cue_region_to_metadata_obj(drwav__metadata_parser* pParser, drwav_metadata* pMetadata, drwav_uint64 chunkSize) { drwav_uint8 buffer[DRWAV_LIST_LABELLED_TEXT_BYTES]; drwav_uint64 totalBytesRead = 0; size_t bytesJustRead = drwav__metadata_parser_read(pParser, buffer, sizeof(buffer), &totalBytesRead); DRWAV_ASSERT(pParser->stage == drwav__metadata_parser_stage_read); if (bytesJustRead == sizeof(buffer)) { drwav_uint32 sizeIncludingNullTerminator; pMetadata->type = drwav_metadata_type_list_labelled_cue_region; pMetadata->data.labelledCueRegion.cuePointId = drwav_bytes_to_u32(buffer + 0); pMetadata->data.labelledCueRegion.sampleLength = drwav_bytes_to_u32(buffer + 4); pMetadata->data.labelledCueRegion.purposeId[0] = buffer[8]; pMetadata->data.labelledCueRegion.purposeId[1] = buffer[9]; pMetadata->data.labelledCueRegion.purposeId[2] = buffer[10]; pMetadata->data.labelledCueRegion.purposeId[3] = buffer[11]; pMetadata->data.labelledCueRegion.country = drwav_bytes_to_u16(buffer + 12); pMetadata->data.labelledCueRegion.language = drwav_bytes_to_u16(buffer + 14); pMetadata->data.labelledCueRegion.dialect = drwav_bytes_to_u16(buffer + 16); pMetadata->data.labelledCueRegion.codePage = drwav_bytes_to_u16(buffer + 18); sizeIncludingNullTerminator = (drwav_uint32)chunkSize - DRWAV_LIST_LABELLED_TEXT_BYTES; if (sizeIncludingNullTerminator > 0) { pMetadata->data.labelledCueRegion.stringLength = sizeIncludingNullTerminator - 1; pMetadata->data.labelledCueRegion.pString = (char*)drwav__metadata_get_memory(pParser, sizeIncludingNullTerminator, 1); DRWAV_ASSERT(pMetadata->data.labelledCueRegion.pString != NULL); drwav__metadata_parser_read(pParser, pMetadata->data.labelledCueRegion.pString, sizeIncludingNullTerminator, &totalBytesRead); } else { pMetadata->data.labelledCueRegion.stringLength = 0; pMetadata->data.labelledCueRegion.pString = NULL; } } return totalBytesRead; } DRWAV_PRIVATE drwav_uint64 drwav__metadata_process_info_text_chunk(drwav__metadata_parser* pParser, drwav_uint64 chunkSize, drwav_metadata_type type) { drwav_uint64 bytesRead = 0; drwav_uint32 stringSizeWithNullTerminator = (drwav_uint32)chunkSize; if (pParser->stage == drwav__metadata_parser_stage_count) { pParser->metadataCount += 1; drwav__metadata_request_extra_memory_for_stage_2(pParser, stringSizeWithNullTerminator, 1); } else { drwav_metadata* pMetadata = &pParser->pMetadata[pParser->metadataCursor]; pMetadata->type = type; if (stringSizeWithNullTerminator > 0) { pMetadata->data.infoText.stringLength = stringSizeWithNullTerminator - 1; pMetadata->data.infoText.pString = (char*)drwav__metadata_get_memory(pParser, stringSizeWithNullTerminator, 1); DRWAV_ASSERT(pMetadata->data.infoText.pString != NULL); bytesRead = drwav__metadata_parser_read(pParser, pMetadata->data.infoText.pString, (size_t)stringSizeWithNullTerminator, NULL); if (bytesRead == chunkSize) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } else { pMetadata->data.infoText.stringLength = 0; pMetadata->data.infoText.pString = NULL; pParser->metadataCursor += 1; } } return bytesRead; } DRWAV_PRIVATE drwav_uint64 drwav__metadata_process_unknown_chunk(drwav__metadata_parser* pParser, const drwav_uint8* pChunkId, drwav_uint64 chunkSize, drwav_metadata_location location) { drwav_uint64 bytesRead = 0; if (location == drwav_metadata_location_invalid) { return 0; } if (drwav_fourcc_equal(pChunkId, "data") || drwav_fourcc_equal(pChunkId, "fmt ") || drwav_fourcc_equal(pChunkId, "fact")) { return 0; } if (pParser->stage == drwav__metadata_parser_stage_count) { pParser->metadataCount += 1; drwav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)chunkSize, 1); } else { drwav_metadata* pMetadata = &pParser->pMetadata[pParser->metadataCursor]; pMetadata->type = drwav_metadata_type_unknown; pMetadata->data.unknown.chunkLocation = location; pMetadata->data.unknown.id[0] = pChunkId[0]; pMetadata->data.unknown.id[1] = pChunkId[1]; pMetadata->data.unknown.id[2] = pChunkId[2]; pMetadata->data.unknown.id[3] = pChunkId[3]; pMetadata->data.unknown.dataSizeInBytes = (drwav_uint32)chunkSize; pMetadata->data.unknown.pData = (drwav_uint8 *)drwav__metadata_get_memory(pParser, (size_t)chunkSize, 1); DRWAV_ASSERT(pMetadata->data.unknown.pData != NULL); bytesRead = drwav__metadata_parser_read(pParser, pMetadata->data.unknown.pData, pMetadata->data.unknown.dataSizeInBytes, NULL); if (bytesRead == pMetadata->data.unknown.dataSizeInBytes) { pParser->metadataCursor += 1; } else { /* Failed to read. */ } } return bytesRead; } DRWAV_PRIVATE drwav_bool32 drwav__chunk_matches(drwav_metadata_type allowedMetadataTypes, const drwav_uint8* pChunkID, drwav_metadata_type type, const char* pID) { return (allowedMetadataTypes & type) && drwav_fourcc_equal(pChunkID, pID); } DRWAV_PRIVATE drwav_uint64 drwav__metadata_process_chunk(drwav__metadata_parser* pParser, const drwav_chunk_header* pChunkHeader, drwav_metadata_type allowedMetadataTypes) { const drwav_uint8 *pChunkID = pChunkHeader->id.fourcc; drwav_uint64 bytesRead = 0; if (drwav__chunk_matches(allowedMetadataTypes, pChunkID, drwav_metadata_type_smpl, "smpl")) { if (pChunkHeader->sizeInBytes >= DRWAV_SMPL_BYTES) { if (pParser->stage == drwav__metadata_parser_stage_count) { drwav_uint8 buffer[4]; size_t bytesJustRead; if (!pParser->onSeek(pParser->pReadSeekUserData, 28, drwav_seek_origin_current)) { return bytesRead; } bytesRead += 28; bytesJustRead = drwav__metadata_parser_read(pParser, buffer, sizeof(buffer), &bytesRead); if (bytesJustRead == sizeof(buffer)) { drwav_uint32 loopCount = drwav_bytes_to_u32(buffer); drwav_uint64 calculatedLoopCount; /* The loop count must be validated against the size of the chunk. */ calculatedLoopCount = (pChunkHeader->sizeInBytes - DRWAV_SMPL_BYTES) / DRWAV_SMPL_LOOP_BYTES; if (calculatedLoopCount == loopCount) { bytesJustRead = drwav__metadata_parser_read(pParser, buffer, sizeof(buffer), &bytesRead); if (bytesJustRead == sizeof(buffer)) { drwav_uint32 samplerSpecificDataSizeInBytes = drwav_bytes_to_u32(buffer); pParser->metadataCount += 1; drwav__metadata_request_extra_memory_for_stage_2(pParser, sizeof(drwav_smpl_loop) * loopCount, DRWAV_METADATA_ALIGNMENT); drwav__metadata_request_extra_memory_for_stage_2(pParser, samplerSpecificDataSizeInBytes, 1); } } else { /* Loop count in header does not match the size of the chunk. */ } } } else { bytesRead = drwav__read_smpl_to_metadata_obj(pParser, pChunkHeader, &pParser->pMetadata[pParser->metadataCursor]); if (bytesRead == pChunkHeader->sizeInBytes) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } } else { /* Incorrectly formed chunk. */ } } else if (drwav__chunk_matches(allowedMetadataTypes, pChunkID, drwav_metadata_type_inst, "inst")) { if (pChunkHeader->sizeInBytes == DRWAV_INST_BYTES) { if (pParser->stage == drwav__metadata_parser_stage_count) { pParser->metadataCount += 1; } else { bytesRead = drwav__read_inst_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor]); if (bytesRead == pChunkHeader->sizeInBytes) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } } else { /* Incorrectly formed chunk. */ } } else if (drwav__chunk_matches(allowedMetadataTypes, pChunkID, drwav_metadata_type_acid, "acid")) { if (pChunkHeader->sizeInBytes == DRWAV_ACID_BYTES) { if (pParser->stage == drwav__metadata_parser_stage_count) { pParser->metadataCount += 1; } else { bytesRead = drwav__read_acid_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor]); if (bytesRead == pChunkHeader->sizeInBytes) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } } else { /* Incorrectly formed chunk. */ } } else if (drwav__chunk_matches(allowedMetadataTypes, pChunkID, drwav_metadata_type_cue, "cue ")) { if (pChunkHeader->sizeInBytes >= DRWAV_CUE_BYTES) { if (pParser->stage == drwav__metadata_parser_stage_count) { size_t cueCount; pParser->metadataCount += 1; cueCount = (size_t)(pChunkHeader->sizeInBytes - DRWAV_CUE_BYTES) / DRWAV_CUE_POINT_BYTES; drwav__metadata_request_extra_memory_for_stage_2(pParser, sizeof(drwav_cue_point) * cueCount, DRWAV_METADATA_ALIGNMENT); } else { bytesRead = drwav__read_cue_to_metadata_obj(pParser, pChunkHeader, &pParser->pMetadata[pParser->metadataCursor]); if (bytesRead == pChunkHeader->sizeInBytes) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } } else { /* Incorrectly formed chunk. */ } } else if (drwav__chunk_matches(allowedMetadataTypes, pChunkID, drwav_metadata_type_bext, "bext")) { if (pChunkHeader->sizeInBytes >= DRWAV_BEXT_BYTES) { if (pParser->stage == drwav__metadata_parser_stage_count) { /* The description field is the largest one in a bext chunk, so that is the max size of this temporary buffer. */ char buffer[DRWAV_BEXT_DESCRIPTION_BYTES + 1]; size_t allocSizeNeeded = DRWAV_BEXT_UMID_BYTES; /* We know we will need SMPTE umid size. */ size_t bytesJustRead; buffer[DRWAV_BEXT_DESCRIPTION_BYTES] = '\0'; bytesJustRead = drwav__metadata_parser_read(pParser, buffer, DRWAV_BEXT_DESCRIPTION_BYTES, &bytesRead); if (bytesJustRead != DRWAV_BEXT_DESCRIPTION_BYTES) { return bytesRead; } allocSizeNeeded += drwav__strlen(buffer) + 1; buffer[DRWAV_BEXT_ORIGINATOR_NAME_BYTES] = '\0'; bytesJustRead = drwav__metadata_parser_read(pParser, buffer, DRWAV_BEXT_ORIGINATOR_NAME_BYTES, &bytesRead); if (bytesJustRead != DRWAV_BEXT_ORIGINATOR_NAME_BYTES) { return bytesRead; } allocSizeNeeded += drwav__strlen(buffer) + 1; buffer[DRWAV_BEXT_ORIGINATOR_REF_BYTES] = '\0'; bytesJustRead = drwav__metadata_parser_read(pParser, buffer, DRWAV_BEXT_ORIGINATOR_REF_BYTES, &bytesRead); if (bytesJustRead != DRWAV_BEXT_ORIGINATOR_REF_BYTES) { return bytesRead; } allocSizeNeeded += drwav__strlen(buffer) + 1; allocSizeNeeded += (size_t)pChunkHeader->sizeInBytes - DRWAV_BEXT_BYTES; /* Coding history. */ drwav__metadata_request_extra_memory_for_stage_2(pParser, allocSizeNeeded, 1); pParser->metadataCount += 1; } else { bytesRead = drwav__read_bext_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], pChunkHeader->sizeInBytes); if (bytesRead == pChunkHeader->sizeInBytes) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } } else { /* Incorrectly formed chunk. */ } } else if (drwav_fourcc_equal(pChunkID, "LIST") || drwav_fourcc_equal(pChunkID, "list")) { drwav_metadata_location listType = drwav_metadata_location_invalid; while (bytesRead < pChunkHeader->sizeInBytes) { drwav_uint8 subchunkId[4]; drwav_uint8 subchunkSizeBuffer[4]; drwav_uint64 subchunkDataSize; drwav_uint64 subchunkBytesRead = 0; drwav_uint64 bytesJustRead = drwav__metadata_parser_read(pParser, subchunkId, sizeof(subchunkId), &bytesRead); if (bytesJustRead != sizeof(subchunkId)) { break; } /* The first thing in a list chunk should be "adtl" or "INFO". - adtl means this list is a Associated Data List Chunk and will contain labels, notes or labelled cue regions. - INFO means this list is an Info List Chunk containing info text chunks such as IPRD which would specifies the album of this wav file. No data follows the adtl or INFO id so we just make note of what type this list is and continue. */ if (drwav_fourcc_equal(subchunkId, "adtl")) { listType = drwav_metadata_location_inside_adtl_list; continue; } else if (drwav_fourcc_equal(subchunkId, "INFO")) { listType = drwav_metadata_location_inside_info_list; continue; } bytesJustRead = drwav__metadata_parser_read(pParser, subchunkSizeBuffer, sizeof(subchunkSizeBuffer), &bytesRead); if (bytesJustRead != sizeof(subchunkSizeBuffer)) { break; } subchunkDataSize = drwav_bytes_to_u32(subchunkSizeBuffer); if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_label, "labl") || drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_note, "note")) { if (subchunkDataSize >= DRWAV_LIST_LABEL_OR_NOTE_BYTES) { drwav_uint64 stringSizeWithNullTerm = subchunkDataSize - DRWAV_LIST_LABEL_OR_NOTE_BYTES; if (pParser->stage == drwav__metadata_parser_stage_count) { pParser->metadataCount += 1; drwav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)stringSizeWithNullTerm, 1); } else { subchunkBytesRead = drwav__read_list_label_or_note_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], subchunkDataSize, drwav_fourcc_equal(subchunkId, "labl") ? drwav_metadata_type_list_label : drwav_metadata_type_list_note); if (subchunkBytesRead == subchunkDataSize) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } } else { /* Incorrectly formed chunk. */ } } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_labelled_cue_region, "ltxt")) { if (subchunkDataSize >= DRWAV_LIST_LABELLED_TEXT_BYTES) { drwav_uint64 stringSizeWithNullTerminator = subchunkDataSize - DRWAV_LIST_LABELLED_TEXT_BYTES; if (pParser->stage == drwav__metadata_parser_stage_count) { pParser->metadataCount += 1; drwav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)stringSizeWithNullTerminator, 1); } else { subchunkBytesRead = drwav__read_list_labelled_cue_region_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], subchunkDataSize); if (subchunkBytesRead == subchunkDataSize) { pParser->metadataCursor += 1; } else { /* Failed to parse. */ } } } else { /* Incorrectly formed chunk. */ } } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_software, "ISFT")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_software); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_copyright, "ICOP")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_copyright); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_title, "INAM")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_title); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_artist, "IART")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_artist); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_comment, "ICMT")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_comment); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_date, "ICRD")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_date); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_genre, "IGNR")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_genre); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_album, "IPRD")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_album); } else if (drwav__chunk_matches(allowedMetadataTypes, subchunkId, drwav_metadata_type_list_info_tracknumber, "ITRK")) { subchunkBytesRead = drwav__metadata_process_info_text_chunk(pParser, subchunkDataSize, drwav_metadata_type_list_info_tracknumber); } else if ((allowedMetadataTypes & drwav_metadata_type_unknown) != 0) { subchunkBytesRead = drwav__metadata_process_unknown_chunk(pParser, subchunkId, subchunkDataSize, listType); } bytesRead += subchunkBytesRead; DRWAV_ASSERT(subchunkBytesRead <= subchunkDataSize); if (subchunkBytesRead < subchunkDataSize) { drwav_uint64 bytesToSeek = subchunkDataSize - subchunkBytesRead; if (!pParser->onSeek(pParser->pReadSeekUserData, (int)bytesToSeek, drwav_seek_origin_current)) { break; } bytesRead += bytesToSeek; } if ((subchunkDataSize % 2) == 1) { if (!pParser->onSeek(pParser->pReadSeekUserData, 1, drwav_seek_origin_current)) { break; } bytesRead += 1; } } } else if ((allowedMetadataTypes & drwav_metadata_type_unknown) != 0) { bytesRead = drwav__metadata_process_unknown_chunk(pParser, pChunkID, pChunkHeader->sizeInBytes, drwav_metadata_location_top_level); } return bytesRead; } DRWAV_PRIVATE drwav_uint32 drwav_get_bytes_per_pcm_frame(drwav* pWav) { drwav_uint32 bytesPerFrame; /* The bytes per frame is a bit ambiguous. It can be either be based on the bits per sample, or the block align. The way I'm doing it here is that if the bits per sample is a multiple of 8, use floor(bitsPerSample*channels/8), otherwise fall back to the block align. */ if ((pWav->bitsPerSample & 0x7) == 0) { /* Bits per sample is a multiple of 8. */ bytesPerFrame = (pWav->bitsPerSample * pWav->fmt.channels) >> 3; } else { bytesPerFrame = pWav->fmt.blockAlign; } /* Validation for known formats. a-law and mu-law should be 1 byte per channel. If it's not, it's not decodable. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW || pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) { if (bytesPerFrame != pWav->fmt.channels) { return 0; /* Invalid file. */ } } return bytesPerFrame; } DRWAV_API drwav_uint16 drwav_fmt_get_format(const drwav_fmt* pFMT) { if (pFMT == NULL) { return 0; } if (pFMT->formatTag != DR_WAVE_FORMAT_EXTENSIBLE) { return pFMT->formatTag; } else { return drwav_bytes_to_u16(pFMT->subFormat); /* Only the first two bytes are required. */ } } DRWAV_PRIVATE drwav_bool32 drwav_preinit(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pWav == NULL || onRead == NULL || onSeek == NULL) { return DRWAV_FALSE; } DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav)); pWav->onRead = onRead; pWav->onSeek = onSeek; pWav->pUserData = pReadSeekUserData; pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { return DRWAV_FALSE; /* Invalid allocation callbacks. */ } return DRWAV_TRUE; } DRWAV_PRIVATE drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags) { /* This function assumes drwav_preinit() has been called beforehand. */ drwav_result result; drwav_uint64 cursor; /* <-- Keeps track of the byte position so we can seek to specific locations. */ drwav_bool32 sequential; drwav_uint8 riff[4]; drwav_fmt fmt; unsigned short translatedFormatTag; drwav_uint64 dataChunkSize = 0; /* <-- Important! Don't explicitly set this to 0 anywhere else. Calculation of the size of the data chunk is performed in different paths depending on the container. */ drwav_uint64 sampleCountFromFactChunk = 0; /* Same as dataChunkSize - make sure this is the only place this is initialized to 0. */ drwav_uint64 metadataStartPos; drwav__metadata_parser metadataParser; drwav_bool8 isProcessingMetadata = DRWAV_FALSE; drwav_bool8 foundChunk_fmt = DRWAV_FALSE; drwav_bool8 foundChunk_data = DRWAV_FALSE; drwav_bool8 isAIFCFormType = DRWAV_FALSE; /* Only used with AIFF. */ drwav_uint64 aiffFrameCount = 0; cursor = 0; sequential = (flags & DRWAV_SEQUENTIAL) != 0; DRWAV_ZERO_OBJECT(&fmt); /* The first 4 bytes should be the RIFF identifier. */ if (drwav__on_read(pWav->onRead, pWav->pUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) { return DRWAV_FALSE; } /* The first 4 bytes can be used to identify the container. For RIFF files it will start with "RIFF" and for w64 it will start with "riff". */ if (drwav_fourcc_equal(riff, "RIFF")) { pWav->container = drwav_container_riff; } else if (drwav_fourcc_equal(riff, "RIFX")) { pWav->container = drwav_container_rifx; } else if (drwav_fourcc_equal(riff, "riff")) { int i; drwav_uint8 riff2[12]; pWav->container = drwav_container_w64; /* Check the rest of the GUID for validity. */ if (drwav__on_read(pWav->onRead, pWav->pUserData, riff2, sizeof(riff2), &cursor) != sizeof(riff2)) { return DRWAV_FALSE; } for (i = 0; i < 12; ++i) { if (riff2[i] != drwavGUID_W64_RIFF[i+4]) { return DRWAV_FALSE; } } } else if (drwav_fourcc_equal(riff, "RF64")) { pWav->container = drwav_container_rf64; } else if (drwav_fourcc_equal(riff, "FORM")) { pWav->container = drwav_container_aiff; } else { return DRWAV_FALSE; /* Unknown or unsupported container. */ } if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) { drwav_uint8 chunkSizeBytes[4]; drwav_uint8 wave[4]; if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { return DRWAV_FALSE; } if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx) { if (drwav_bytes_to_u32_ex(chunkSizeBytes, pWav->container) < 36) { return DRWAV_FALSE; /* Chunk size should always be at least 36 bytes. */ } } else if (pWav->container == drwav_container_rf64) { if (drwav_bytes_to_u32_le(chunkSizeBytes) != 0xFFFFFFFF) { return DRWAV_FALSE; /* Chunk size should always be set to -1/0xFFFFFFFF for RF64. The actual size is retrieved later. */ } } else { return DRWAV_FALSE; /* Should never hit this. */ } if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { return DRWAV_FALSE; } if (!drwav_fourcc_equal(wave, "WAVE")) { return DRWAV_FALSE; /* Expecting "WAVE". */ } } else if (pWav->container == drwav_container_w64) { drwav_uint8 chunkSizeBytes[8]; drwav_uint8 wave[16]; if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { return DRWAV_FALSE; } if (drwav_bytes_to_u64(chunkSizeBytes) < 80) { return DRWAV_FALSE; } if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { return DRWAV_FALSE; } if (!drwav_guid_equal(wave, drwavGUID_W64_WAVE)) { return DRWAV_FALSE; } } else if (pWav->container == drwav_container_aiff) { drwav_uint8 chunkSizeBytes[4]; drwav_uint8 aiff[4]; if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { return DRWAV_FALSE; } if (drwav_bytes_to_u32_be(chunkSizeBytes) < 18) { return DRWAV_FALSE; } if (drwav__on_read(pWav->onRead, pWav->pUserData, aiff, sizeof(aiff), &cursor) != sizeof(aiff)) { return DRWAV_FALSE; } if (drwav_fourcc_equal(aiff, "AIFF")) { isAIFCFormType = DRWAV_FALSE; } else if (drwav_fourcc_equal(aiff, "AIFC")) { isAIFCFormType = DRWAV_TRUE; } else { return DRWAV_FALSE; /* Expecting "AIFF" or "AIFC". */ } } else { return DRWAV_FALSE; } /* For RF64, the "ds64" chunk must come next, before the "fmt " chunk. */ if (pWav->container == drwav_container_rf64) { drwav_uint8 sizeBytes[8]; drwav_uint64 bytesRemainingInChunk; drwav_chunk_header header; result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); if (result != DRWAV_SUCCESS) { return DRWAV_FALSE; } if (!drwav_fourcc_equal(header.id.fourcc, "ds64")) { return DRWAV_FALSE; /* Expecting "ds64". */ } bytesRemainingInChunk = header.sizeInBytes + header.paddingSize; /* We don't care about the size of the RIFF chunk - skip it. */ if (!drwav__seek_forward(pWav->onSeek, 8, pWav->pUserData)) { return DRWAV_FALSE; } bytesRemainingInChunk -= 8; cursor += 8; /* Next 8 bytes is the size of the "data" chunk. */ if (drwav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { return DRWAV_FALSE; } bytesRemainingInChunk -= 8; dataChunkSize = drwav_bytes_to_u64(sizeBytes); /* Next 8 bytes is the same count which we would usually derived from the FACT chunk if it was available. */ if (drwav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { return DRWAV_FALSE; } bytesRemainingInChunk -= 8; sampleCountFromFactChunk = drwav_bytes_to_u64(sizeBytes); /* Skip over everything else. */ if (!drwav__seek_forward(pWav->onSeek, bytesRemainingInChunk, pWav->pUserData)) { return DRWAV_FALSE; } cursor += bytesRemainingInChunk; } metadataStartPos = cursor; /* Whether or not we are processing metadata controls how we load. We can load more efficiently when metadata is not being processed, but we also cannot process metadata for Wave64 because I have not been able to test it. If someone is able to test this and provide a patch I'm happy to enable it. Seqential mode cannot support metadata because it involves seeking backwards. */ isProcessingMetadata = !sequential && ((flags & DRWAV_WITH_METADATA) != 0); /* Don't allow processing of metadata with untested containers. */ if (pWav->container != drwav_container_riff && pWav->container != drwav_container_rf64) { isProcessingMetadata = DRWAV_FALSE; } DRWAV_ZERO_MEMORY(&metadataParser, sizeof(metadataParser)); if (isProcessingMetadata) { metadataParser.onRead = pWav->onRead; metadataParser.onSeek = pWav->onSeek; metadataParser.pReadSeekUserData = pWav->pUserData; metadataParser.stage = drwav__metadata_parser_stage_count; } /* From here on out, chunks might be in any order. In order to robustly handle metadata we'll need to loop through every chunk and handle them as we find them. In sequential mode we need to get out of the loop as soon as we find the data chunk because we won't be able to seek back. */ for (;;) { /* For each chunk... */ drwav_chunk_header header; drwav_uint64 chunkSize; result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); if (result != DRWAV_SUCCESS) { break; } chunkSize = header.sizeInBytes; /* Always tell the caller about this chunk. We cannot do this in sequential mode because the callback is allowed to read from the file, in which case we'll need to rewind. */ if (!sequential && onChunk != NULL) { drwav_uint64 callbackBytesRead = onChunk(pChunkUserData, pWav->onRead, pWav->onSeek, pWav->pUserData, &header, pWav->container, &fmt); /* dr_wav may need to read the contents of the chunk, so we now need to seek back to the position before we called the callback. */ if (callbackBytesRead > 0) { if (drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == DRWAV_FALSE) { return DRWAV_FALSE; } } } /* Explicitly handle known chunks first. */ /* "fmt " */ if (((pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) && drwav_fourcc_equal(header.id.fourcc, "fmt ")) || ((pWav->container == drwav_container_w64) && drwav_guid_equal(header.id.guid, drwavGUID_W64_FMT))) { drwav_uint8 fmtData[16]; foundChunk_fmt = DRWAV_TRUE; if (pWav->onRead(pWav->pUserData, fmtData, sizeof(fmtData)) != sizeof(fmtData)) { return DRWAV_FALSE; } cursor += sizeof(fmtData); fmt.formatTag = drwav_bytes_to_u16_ex(fmtData + 0, pWav->container); fmt.channels = drwav_bytes_to_u16_ex(fmtData + 2, pWav->container); fmt.sampleRate = drwav_bytes_to_u32_ex(fmtData + 4, pWav->container); fmt.avgBytesPerSec = drwav_bytes_to_u32_ex(fmtData + 8, pWav->container); fmt.blockAlign = drwav_bytes_to_u16_ex(fmtData + 12, pWav->container); fmt.bitsPerSample = drwav_bytes_to_u16_ex(fmtData + 14, pWav->container); fmt.extendedSize = 0; fmt.validBitsPerSample = 0; fmt.channelMask = 0; DRWAV_ZERO_MEMORY(fmt.subFormat, sizeof(fmt.subFormat)); if (header.sizeInBytes > 16) { drwav_uint8 fmt_cbSize[2]; int bytesReadSoFar = 0; if (pWav->onRead(pWav->pUserData, fmt_cbSize, sizeof(fmt_cbSize)) != sizeof(fmt_cbSize)) { return DRWAV_FALSE; /* Expecting more data. */ } cursor += sizeof(fmt_cbSize); bytesReadSoFar = 18; fmt.extendedSize = drwav_bytes_to_u16_ex(fmt_cbSize, pWav->container); if (fmt.extendedSize > 0) { /* Simple validation. */ if (fmt.formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { if (fmt.extendedSize != 22) { return DRWAV_FALSE; } } if (fmt.formatTag == DR_WAVE_FORMAT_EXTENSIBLE) { drwav_uint8 fmtext[22]; if (pWav->onRead(pWav->pUserData, fmtext, fmt.extendedSize) != fmt.extendedSize) { return DRWAV_FALSE; /* Expecting more data. */ } fmt.validBitsPerSample = drwav_bytes_to_u16_ex(fmtext + 0, pWav->container); fmt.channelMask = drwav_bytes_to_u32_ex(fmtext + 2, pWav->container); drwav_bytes_to_guid(fmtext + 6, fmt.subFormat); } else { if (pWav->onSeek(pWav->pUserData, fmt.extendedSize, drwav_seek_origin_current) == DRWAV_FALSE) { return DRWAV_FALSE; } } cursor += fmt.extendedSize; bytesReadSoFar += fmt.extendedSize; } /* Seek past any leftover bytes. For w64 the leftover will be defined based on the chunk size. */ if (pWav->onSeek(pWav->pUserData, (int)(header.sizeInBytes - bytesReadSoFar), drwav_seek_origin_current) == DRWAV_FALSE) { return DRWAV_FALSE; } cursor += (header.sizeInBytes - bytesReadSoFar); } if (header.paddingSize > 0) { if (drwav__seek_forward(pWav->onSeek, header.paddingSize, pWav->pUserData) == DRWAV_FALSE) { break; } cursor += header.paddingSize; } /* Go to the next chunk. Don't include this chunk in metadata. */ continue; } /* "data" */ if (((pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) && drwav_fourcc_equal(header.id.fourcc, "data")) || ((pWav->container == drwav_container_w64) && drwav_guid_equal(header.id.guid, drwavGUID_W64_DATA))) { foundChunk_data = DRWAV_TRUE; pWav->dataChunkDataPos = cursor; if (pWav->container != drwav_container_rf64) { /* The data chunk size for RF64 will always be set to 0xFFFFFFFF here. It was set to it's true value earlier. */ dataChunkSize = chunkSize; } /* If we're running in sequential mode, or we're not reading metadata, we have enough now that we can get out of the loop. */ if (sequential || !isProcessingMetadata) { break; /* No need to keep reading beyond the data chunk. */ } else { chunkSize += header.paddingSize; /* <-- Make sure we seek past the padding. */ if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { break; } cursor += chunkSize; continue; /* There may be some more metadata to read. */ } } /* "fact". This is optional. Can use this to get the sample count which is useful for compressed formats. For RF64 we retrieved the sample count from the ds64 chunk earlier. */ if (((pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx || pWav->container == drwav_container_rf64) && drwav_fourcc_equal(header.id.fourcc, "fact")) || ((pWav->container == drwav_container_w64) && drwav_guid_equal(header.id.guid, drwavGUID_W64_FACT))) { if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx) { drwav_uint8 sampleCount[4]; if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCount, 4, &cursor) != 4) { return DRWAV_FALSE; } chunkSize -= 4; /* The sample count in the "fact" chunk is either unreliable, or I'm not understanding it properly. For now I am only enabling this for Microsoft ADPCM formats. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { sampleCountFromFactChunk = drwav_bytes_to_u32_ex(sampleCount, pWav->container); } else { sampleCountFromFactChunk = 0; } } else if (pWav->container == drwav_container_w64) { if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) { return DRWAV_FALSE; } chunkSize -= 8; } else if (pWav->container == drwav_container_rf64) { /* We retrieved the sample count from the ds64 chunk earlier so no need to do that here. */ } /* Seek to the next chunk in preparation for the next iteration. */ chunkSize += header.paddingSize; /* <-- Make sure we seek past the padding. */ if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { break; } cursor += chunkSize; continue; } /* "COMM". AIFF/AIFC only. */ if (pWav->container == drwav_container_aiff && drwav_fourcc_equal(header.id.fourcc, "COMM")) { drwav_uint8 commData[24]; drwav_uint32 commDataBytesToRead; drwav_uint16 channels; drwav_uint32 frameCount; drwav_uint16 sampleSizeInBits; drwav_int64 sampleRate; drwav_uint16 compressionFormat; foundChunk_fmt = DRWAV_TRUE; if (isAIFCFormType) { commDataBytesToRead = 24; if (header.sizeInBytes < commDataBytesToRead) { return DRWAV_FALSE; /* Invalid COMM chunk. */ } } else { commDataBytesToRead = 18; if (header.sizeInBytes != commDataBytesToRead) { return DRWAV_FALSE; /* INVALID COMM chunk. */ } } if (drwav__on_read(pWav->onRead, pWav->pUserData, commData, commDataBytesToRead, &cursor) != commDataBytesToRead) { return DRWAV_FALSE; } channels = drwav_bytes_to_u16_ex (commData + 0, pWav->container); frameCount = drwav_bytes_to_u32_ex (commData + 2, pWav->container); sampleSizeInBits = drwav_bytes_to_u16_ex (commData + 6, pWav->container); sampleRate = drwav_aiff_extented_to_s64(commData + 8); if (sampleRate < 0 || sampleRate > 0xFFFFFFFF) { return DRWAV_FALSE; /* Invalid sample rate. */ } if (isAIFCFormType) { const drwav_uint8* type = commData + 18; if (drwav_fourcc_equal(type, "NONE")) { compressionFormat = DR_WAVE_FORMAT_PCM; /* PCM, big-endian. */ } else if (drwav_fourcc_equal(type, "raw ")) { compressionFormat = DR_WAVE_FORMAT_PCM; /* In my testing, it looks like when the "raw " compression type is used, 8-bit samples should be considered unsigned. */ if (sampleSizeInBits == 8) { pWav->aiff.isUnsigned = DRWAV_TRUE; } } else if (drwav_fourcc_equal(type, "sowt")) { compressionFormat = DR_WAVE_FORMAT_PCM; /* PCM, little-endian. */ pWav->aiff.isLE = DRWAV_TRUE; } else if (drwav_fourcc_equal(type, "fl32") || drwav_fourcc_equal(type, "fl64") || drwav_fourcc_equal(type, "FL32") || drwav_fourcc_equal(type, "FL64")) { compressionFormat = DR_WAVE_FORMAT_IEEE_FLOAT; } else if (drwav_fourcc_equal(type, "alaw") || drwav_fourcc_equal(type, "ALAW")) { compressionFormat = DR_WAVE_FORMAT_ALAW; } else if (drwav_fourcc_equal(type, "ulaw") || drwav_fourcc_equal(type, "ULAW")) { compressionFormat = DR_WAVE_FORMAT_MULAW; } else if (drwav_fourcc_equal(type, "ima4")) { compressionFormat = DR_WAVE_FORMAT_DVI_ADPCM; sampleSizeInBits = 4; /* I haven't been able to figure out how to get correct decoding for IMA ADPCM. Until this is figured out we'll need to abort when we encounter such an encoding. Advice welcome! */ return DRWAV_FALSE; } else { return DRWAV_FALSE; /* Unknown or unsupported compression format. Need to abort. */ } } else { compressionFormat = DR_WAVE_FORMAT_PCM; /* It's a standard AIFF form which is always compressed. */ } /* With AIFF we want to use the explicitly defined frame count rather than deriving it from the size of the chunk. */ aiffFrameCount = frameCount; /* We should now have enough information to fill out our fmt structure. */ fmt.formatTag = compressionFormat; fmt.channels = channels; fmt.sampleRate = (drwav_uint32)sampleRate; fmt.bitsPerSample = sampleSizeInBits; fmt.blockAlign = (drwav_uint16)(fmt.channels * fmt.bitsPerSample / 8); fmt.avgBytesPerSec = fmt.blockAlign * fmt.sampleRate; if (fmt.blockAlign == 0 && compressionFormat == DR_WAVE_FORMAT_DVI_ADPCM) { fmt.blockAlign = 34 * fmt.channels; } /* Weird one. I've seen some alaw and ulaw encoded files that for some reason set the bits per sample to 16 when it should be 8. To get this working I need to explicitly check for this and change it. */ if (compressionFormat == DR_WAVE_FORMAT_ALAW || compressionFormat == DR_WAVE_FORMAT_MULAW) { if (fmt.bitsPerSample > 8) { fmt.bitsPerSample = 8; fmt.blockAlign = fmt.channels; } } /* In AIFF, samples are padded to 8 byte boundaries. We need to round up our bits per sample here. */ fmt.bitsPerSample += (fmt.bitsPerSample & 7); /* If the form type is AIFC there will be some additional data in the chunk. We need to seek past it. */ if (isAIFCFormType) { if (drwav__seek_forward(pWav->onSeek, (chunkSize - commDataBytesToRead), pWav->pUserData) == DRWAV_FALSE) { return DRWAV_FALSE; } cursor += (chunkSize - commDataBytesToRead); } /* Don't fall through or else we'll end up treating this chunk as metadata which is incorrect. */ continue; } /* "SSND". AIFF/AIFC only. This is the AIFF equivalent of the "data" chunk. */ if (pWav->container == drwav_container_aiff && drwav_fourcc_equal(header.id.fourcc, "SSND")) { drwav_uint8 offsetAndBlockSizeData[8]; drwav_uint32 offset; foundChunk_data = DRWAV_TRUE; if (drwav__on_read(pWav->onRead, pWav->pUserData, offsetAndBlockSizeData, sizeof(offsetAndBlockSizeData), &cursor) != sizeof(offsetAndBlockSizeData)) { return DRWAV_FALSE; } /* We need to seek forward by the offset. */ offset = drwav_bytes_to_u32_ex(offsetAndBlockSizeData + 0, pWav->container); if (drwav__seek_forward(pWav->onSeek, offset, pWav->pUserData) == DRWAV_FALSE) { return DRWAV_FALSE; } cursor += offset; pWav->dataChunkDataPos = cursor; dataChunkSize = chunkSize; /* If we're running in sequential mode, or we're not reading metadata, we have enough now that we can get out of the loop. */ if (sequential || !isProcessingMetadata) { break; /* No need to keep reading beyond the data chunk. */ } else { if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { break; } cursor += chunkSize; continue; /* There may be some more metadata to read. */ } } /* Getting here means it's not a chunk that we care about internally, but might need to be handled as metadata by the caller. */ if (isProcessingMetadata) { drwav__metadata_process_chunk(&metadataParser, &header, drwav_metadata_type_all_including_unknown); /* Go back to the start of the chunk so we can normalize the position of the cursor. */ if (drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == DRWAV_FALSE) { break; /* Failed to seek. Can't reliable read the remaining chunks. Get out. */ } } /* Make sure we skip past the content of this chunk before we go to the next one. */ chunkSize += header.paddingSize; /* <-- Make sure we seek past the padding. */ if (drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == DRWAV_FALSE) { break; } cursor += chunkSize; } /* There's some mandatory chunks that must exist. If they were not found in the iteration above we must abort. */ if (!foundChunk_fmt || !foundChunk_data) { return DRWAV_FALSE; } /* Basic validation. */ if ((fmt.sampleRate == 0 || fmt.sampleRate > DRWAV_MAX_SAMPLE_RATE ) || (fmt.channels == 0 || fmt.channels > DRWAV_MAX_CHANNELS ) || (fmt.bitsPerSample == 0 || fmt.bitsPerSample > DRWAV_MAX_BITS_PER_SAMPLE) || fmt.blockAlign == 0) { return DRWAV_FALSE; /* Probably an invalid WAV file. */ } /* Translate the internal format. */ translatedFormatTag = fmt.formatTag; if (translatedFormatTag == DR_WAVE_FORMAT_EXTENSIBLE) { translatedFormatTag = drwav_bytes_to_u16_ex(fmt.subFormat + 0, pWav->container); } /* We may have moved passed the data chunk. If so we need to move back. If running in sequential mode we can assume we are already sitting on the data chunk. */ if (!sequential) { if (!drwav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData)) { return DRWAV_FALSE; } cursor = pWav->dataChunkDataPos; } /* At this point we should have done the initial parsing of each of our chunks, but we now need to do a second pass to extract the actual contents of the metadata (the first pass just calculated the length of the memory allocation). We only do this if we've actually got metadata to parse. */ if (isProcessingMetadata && metadataParser.metadataCount > 0) { if (drwav__seek_from_start(pWav->onSeek, metadataStartPos, pWav->pUserData) == DRWAV_FALSE) { return DRWAV_FALSE; } result = drwav__metadata_alloc(&metadataParser, &pWav->allocationCallbacks); if (result != DRWAV_SUCCESS) { return DRWAV_FALSE; } metadataParser.stage = drwav__metadata_parser_stage_read; for (;;) { drwav_chunk_header header; drwav_uint64 metadataBytesRead; result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); if (result != DRWAV_SUCCESS) { break; } metadataBytesRead = drwav__metadata_process_chunk(&metadataParser, &header, drwav_metadata_type_all_including_unknown); /* Move to the end of the chunk so we can keep iterating. */ if (drwav__seek_forward(pWav->onSeek, (header.sizeInBytes + header.paddingSize) - metadataBytesRead, pWav->pUserData) == DRWAV_FALSE) { drwav_free(metadataParser.pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; } } /* Getting here means we're finished parsing the metadata. */ pWav->pMetadata = metadataParser.pMetadata; pWav->metadataCount = metadataParser.metadataCount; } /* At this point we should be sitting on the first byte of the raw audio data. */ /* I've seen a WAV file in the wild where a RIFF-ecapsulated file has the size of it's "RIFF" and "data" chunks set to 0xFFFFFFFF when the file is definitely not that big. In this case we're going to have to calculate the size by reading and discarding bytes, and then seeking back. We cannot do this in sequential mode. We just assume that the rest of the file is audio data. */ if (dataChunkSize == 0xFFFFFFFF && (pWav->container == drwav_container_riff || pWav->container == drwav_container_rifx) && pWav->isSequentialWrite == DRWAV_FALSE) { dataChunkSize = 0; for (;;) { drwav_uint8 temp[4096]; size_t bytesRead = pWav->onRead(pWav->pUserData, temp, sizeof(temp)); dataChunkSize += bytesRead; if (bytesRead < sizeof(temp)) { break; } } } if (drwav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData) == DRWAV_FALSE) { drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; } pWav->fmt = fmt; pWav->sampleRate = fmt.sampleRate; pWav->channels = fmt.channels; pWav->bitsPerSample = fmt.bitsPerSample; pWav->bytesRemaining = dataChunkSize; pWav->translatedFormatTag = translatedFormatTag; pWav->dataChunkDataSize = dataChunkSize; if (sampleCountFromFactChunk != 0) { pWav->totalPCMFrameCount = sampleCountFromFactChunk; } else if (aiffFrameCount != 0) { pWav->totalPCMFrameCount = aiffFrameCount; } else { drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; /* Invalid file. */ } pWav->totalPCMFrameCount = dataChunkSize / bytesPerFrame; if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { drwav_uint64 totalBlockHeaderSizeInBytes; drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; /* Make sure any trailing partial block is accounted for. */ if ((blockCount * fmt.blockAlign) < dataChunkSize) { blockCount += 1; } /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */ totalBlockHeaderSizeInBytes = blockCount * (6*fmt.channels); pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { drwav_uint64 totalBlockHeaderSizeInBytes; drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; /* Make sure any trailing partial block is accounted for. */ if ((blockCount * fmt.blockAlign) < dataChunkSize) { blockCount += 1; } /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */ totalBlockHeaderSizeInBytes = blockCount * (4*fmt.channels); pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; /* The header includes a decoded sample for each channel which acts as the initial predictor sample. */ pWav->totalPCMFrameCount += blockCount; } } /* Some formats only support a certain number of channels. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { if (pWav->channels > 2) { drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; } } /* The number of bytes per frame must be known. If not, it's an invalid file and not decodable. */ if (drwav_get_bytes_per_pcm_frame(pWav) == 0) { drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); return DRWAV_FALSE; } #ifdef DR_WAV_LIBSNDFILE_COMPAT /* I use libsndfile as a benchmark for testing, however in the version I'm using (from the Windows installer on the libsndfile website), it appears the total sample count libsndfile uses for MS-ADPCM is incorrect. It would seem they are computing the total sample count from the number of blocks, however this results in the inclusion of extra silent samples at the end of the last block. The correct way to know the total sample count is to inspect the "fact" chunk, which should always be present for compressed formats, and should always include the sample count. This little block of code below is only used to emulate the libsndfile logic so I can properly run my correctness tests against libsndfile, and is disabled by default. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (6*pWav->channels))) * 2)) / fmt.channels; /* x2 because two samples per byte. */ } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign; pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (4*pWav->channels))) * 2) + (blockCount * pWav->channels)) / fmt.channels; } #endif return DRWAV_TRUE; } DRWAV_API drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { if (!drwav_preinit(pWav, onRead, onSeek, pReadSeekUserData, pAllocationCallbacks)) { return DRWAV_FALSE; } return drwav_init__internal(pWav, onChunk, pChunkUserData, flags); } DRWAV_API drwav_bool32 drwav_init_with_metadata(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { if (!drwav_preinit(pWav, onRead, onSeek, pUserData, pAllocationCallbacks)) { return DRWAV_FALSE; } return drwav_init__internal(pWav, NULL, NULL, flags | DRWAV_WITH_METADATA); } DRWAV_API drwav_metadata* drwav_take_ownership_of_metadata(drwav* pWav) { drwav_metadata *result = pWav->pMetadata; pWav->pMetadata = NULL; pWav->metadataCount = 0; return result; } DRWAV_PRIVATE size_t drwav__write(drwav* pWav, const void* pData, size_t dataSize) { DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->onWrite != NULL); /* Generic write. Assumes no byte reordering required. */ return pWav->onWrite(pWav->pUserData, pData, dataSize); } DRWAV_PRIVATE size_t drwav__write_byte(drwav* pWav, drwav_uint8 byte) { DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->onWrite != NULL); return pWav->onWrite(pWav->pUserData, &byte, 1); } DRWAV_PRIVATE size_t drwav__write_u16ne_to_le(drwav* pWav, drwav_uint16 value) { DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->onWrite != NULL); if (!drwav__is_little_endian()) { value = drwav__bswap16(value); } return drwav__write(pWav, &value, 2); } DRWAV_PRIVATE size_t drwav__write_u32ne_to_le(drwav* pWav, drwav_uint32 value) { DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->onWrite != NULL); if (!drwav__is_little_endian()) { value = drwav__bswap32(value); } return drwav__write(pWav, &value, 4); } DRWAV_PRIVATE size_t drwav__write_u64ne_to_le(drwav* pWav, drwav_uint64 value) { DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->onWrite != NULL); if (!drwav__is_little_endian()) { value = drwav__bswap64(value); } return drwav__write(pWav, &value, 8); } DRWAV_PRIVATE size_t drwav__write_f32ne_to_le(drwav* pWav, float value) { union { drwav_uint32 u32; float f32; } u; DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->onWrite != NULL); u.f32 = value; if (!drwav__is_little_endian()) { u.u32 = drwav__bswap32(u.u32); } return drwav__write(pWav, &u.u32, 4); } DRWAV_PRIVATE size_t drwav__write_or_count(drwav* pWav, const void* pData, size_t dataSize) { if (pWav == NULL) { return dataSize; } return drwav__write(pWav, pData, dataSize); } DRWAV_PRIVATE size_t drwav__write_or_count_byte(drwav* pWav, drwav_uint8 byte) { if (pWav == NULL) { return 1; } return drwav__write_byte(pWav, byte); } DRWAV_PRIVATE size_t drwav__write_or_count_u16ne_to_le(drwav* pWav, drwav_uint16 value) { if (pWav == NULL) { return 2; } return drwav__write_u16ne_to_le(pWav, value); } DRWAV_PRIVATE size_t drwav__write_or_count_u32ne_to_le(drwav* pWav, drwav_uint32 value) { if (pWav == NULL) { return 4; } return drwav__write_u32ne_to_le(pWav, value); } #if 0 /* Unused for now. */ DRWAV_PRIVATE size_t drwav__write_or_count_u64ne_to_le(drwav* pWav, drwav_uint64 value) { if (pWav == NULL) { return 8; } return drwav__write_u64ne_to_le(pWav, value); } #endif DRWAV_PRIVATE size_t drwav__write_or_count_f32ne_to_le(drwav* pWav, float value) { if (pWav == NULL) { return 4; } return drwav__write_f32ne_to_le(pWav, value); } DRWAV_PRIVATE size_t drwav__write_or_count_string_to_fixed_size_buf(drwav* pWav, char* str, size_t bufFixedSize) { size_t len; if (pWav == NULL) { return bufFixedSize; } len = drwav__strlen_clamped(str, bufFixedSize); drwav__write_or_count(pWav, str, len); if (len < bufFixedSize) { size_t i; for (i = 0; i < bufFixedSize - len; ++i) { drwav__write_byte(pWav, 0); } } return bufFixedSize; } /* pWav can be NULL meaning just count the bytes that would be written. */ DRWAV_PRIVATE size_t drwav__write_or_count_metadata(drwav* pWav, drwav_metadata* pMetadatas, drwav_uint32 metadataCount) { size_t bytesWritten = 0; drwav_bool32 hasListAdtl = DRWAV_FALSE; drwav_bool32 hasListInfo = DRWAV_FALSE; drwav_uint32 iMetadata; if (pMetadatas == NULL || metadataCount == 0) { return 0; } for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { drwav_metadata* pMetadata = &pMetadatas[iMetadata]; drwav_uint32 chunkSize = 0; if ((pMetadata->type & drwav_metadata_type_list_all_info_strings) || (pMetadata->type == drwav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == drwav_metadata_location_inside_info_list)) { hasListInfo = DRWAV_TRUE; } if ((pMetadata->type & drwav_metadata_type_list_all_adtl) || (pMetadata->type == drwav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == drwav_metadata_location_inside_adtl_list)) { hasListAdtl = DRWAV_TRUE; } switch (pMetadata->type) { case drwav_metadata_type_smpl: { drwav_uint32 iLoop; chunkSize = DRWAV_SMPL_BYTES + DRWAV_SMPL_LOOP_BYTES * pMetadata->data.smpl.sampleLoopCount + pMetadata->data.smpl.samplerSpecificDataSizeInBytes; bytesWritten += drwav__write_or_count(pWav, "smpl", 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.manufacturerId); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.productId); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.samplePeriodNanoseconds); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.midiUnityNote); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.midiPitchFraction); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.smpteFormat); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.smpteOffset); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.sampleLoopCount); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); for (iLoop = 0; iLoop < pMetadata->data.smpl.sampleLoopCount; ++iLoop) { bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].cuePointId); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].type); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].firstSampleByteOffset); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].lastSampleByteOffset); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].sampleFraction); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].playCount); } if (pMetadata->data.smpl.samplerSpecificDataSizeInBytes > 0) { bytesWritten += drwav__write_or_count(pWav, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); } } break; case drwav_metadata_type_inst: { chunkSize = DRWAV_INST_BYTES; bytesWritten += drwav__write_or_count(pWav, "inst", 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count(pWav, &pMetadata->data.inst.midiUnityNote, 1); bytesWritten += drwav__write_or_count(pWav, &pMetadata->data.inst.fineTuneCents, 1); bytesWritten += drwav__write_or_count(pWav, &pMetadata->data.inst.gainDecibels, 1); bytesWritten += drwav__write_or_count(pWav, &pMetadata->data.inst.lowNote, 1); bytesWritten += drwav__write_or_count(pWav, &pMetadata->data.inst.highNote, 1); bytesWritten += drwav__write_or_count(pWav, &pMetadata->data.inst.lowVelocity, 1); bytesWritten += drwav__write_or_count(pWav, &pMetadata->data.inst.highVelocity, 1); } break; case drwav_metadata_type_cue: { drwav_uint32 iCuePoint; chunkSize = DRWAV_CUE_BYTES + DRWAV_CUE_POINT_BYTES * pMetadata->data.cue.cuePointCount; bytesWritten += drwav__write_or_count(pWav, "cue ", 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.cuePointCount); for (iCuePoint = 0; iCuePoint < pMetadata->data.cue.cuePointCount; ++iCuePoint) { bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].id); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].playOrderPosition); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId, 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].chunkStart); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].blockStart); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].sampleByteOffset); } } break; case drwav_metadata_type_acid: { chunkSize = DRWAV_ACID_BYTES; bytesWritten += drwav__write_or_count(pWav, "acid", 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.acid.flags); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.midiUnityNote); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.reserved1); bytesWritten += drwav__write_or_count_f32ne_to_le(pWav, pMetadata->data.acid.reserved2); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.acid.numBeats); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.meterDenominator); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.meterNumerator); bytesWritten += drwav__write_or_count_f32ne_to_le(pWav, pMetadata->data.acid.tempo); } break; case drwav_metadata_type_bext: { char reservedBuf[DRWAV_BEXT_RESERVED_BYTES]; drwav_uint32 timeReferenceLow; drwav_uint32 timeReferenceHigh; chunkSize = DRWAV_BEXT_BYTES + pMetadata->data.bext.codingHistorySize; bytesWritten += drwav__write_or_count(pWav, "bext", 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pDescription, DRWAV_BEXT_DESCRIPTION_BYTES); bytesWritten += drwav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pOriginatorName, DRWAV_BEXT_ORIGINATOR_NAME_BYTES); bytesWritten += drwav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pOriginatorReference, DRWAV_BEXT_ORIGINATOR_REF_BYTES); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.bext.pOriginationDate, sizeof(pMetadata->data.bext.pOriginationDate)); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.bext.pOriginationTime, sizeof(pMetadata->data.bext.pOriginationTime)); timeReferenceLow = (drwav_uint32)(pMetadata->data.bext.timeReference & 0xFFFFFFFF); timeReferenceHigh = (drwav_uint32)(pMetadata->data.bext.timeReference >> 32); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, timeReferenceLow); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, timeReferenceHigh); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.version); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.bext.pUMID, DRWAV_BEXT_UMID_BYTES); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.loudnessValue); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.loudnessRange); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxTruePeakLevel); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxMomentaryLoudness); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxShortTermLoudness); DRWAV_ZERO_MEMORY(reservedBuf, sizeof(reservedBuf)); bytesWritten += drwav__write_or_count(pWav, reservedBuf, sizeof(reservedBuf)); if (pMetadata->data.bext.codingHistorySize > 0) { bytesWritten += drwav__write_or_count(pWav, pMetadata->data.bext.pCodingHistory, pMetadata->data.bext.codingHistorySize); } } break; case drwav_metadata_type_unknown: { if (pMetadata->data.unknown.chunkLocation == drwav_metadata_location_top_level) { chunkSize = pMetadata->data.unknown.dataSizeInBytes; bytesWritten += drwav__write_or_count(pWav, pMetadata->data.unknown.id, 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.unknown.pData, pMetadata->data.unknown.dataSizeInBytes); } } break; default: break; } if ((chunkSize % 2) != 0) { bytesWritten += drwav__write_or_count_byte(pWav, 0); } } if (hasListInfo) { drwav_uint32 chunkSize = 4; /* Start with 4 bytes for "INFO". */ for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { drwav_metadata* pMetadata = &pMetadatas[iMetadata]; if ((pMetadata->type & drwav_metadata_type_list_all_info_strings)) { chunkSize += 8; /* For id and string size. */ chunkSize += pMetadata->data.infoText.stringLength + 1; /* Include null terminator. */ } else if (pMetadata->type == drwav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == drwav_metadata_location_inside_info_list) { chunkSize += 8; /* For id string size. */ chunkSize += pMetadata->data.unknown.dataSizeInBytes; } if ((chunkSize % 2) != 0) { chunkSize += 1; } } bytesWritten += drwav__write_or_count(pWav, "LIST", 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count(pWav, "INFO", 4); for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { drwav_metadata* pMetadata = &pMetadatas[iMetadata]; drwav_uint32 subchunkSize = 0; if (pMetadata->type & drwav_metadata_type_list_all_info_strings) { const char* pID = NULL; switch (pMetadata->type) { case drwav_metadata_type_list_info_software: pID = "ISFT"; break; case drwav_metadata_type_list_info_copyright: pID = "ICOP"; break; case drwav_metadata_type_list_info_title: pID = "INAM"; break; case drwav_metadata_type_list_info_artist: pID = "IART"; break; case drwav_metadata_type_list_info_comment: pID = "ICMT"; break; case drwav_metadata_type_list_info_date: pID = "ICRD"; break; case drwav_metadata_type_list_info_genre: pID = "IGNR"; break; case drwav_metadata_type_list_info_album: pID = "IPRD"; break; case drwav_metadata_type_list_info_tracknumber: pID = "ITRK"; break; default: break; } DRWAV_ASSERT(pID != NULL); if (pMetadata->data.infoText.stringLength) { subchunkSize = pMetadata->data.infoText.stringLength + 1; bytesWritten += drwav__write_or_count(pWav, pID, 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, subchunkSize); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.infoText.pString, pMetadata->data.infoText.stringLength); bytesWritten += drwav__write_or_count_byte(pWav, '\0'); } } else if (pMetadata->type == drwav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == drwav_metadata_location_inside_info_list) { if (pMetadata->data.unknown.dataSizeInBytes) { subchunkSize = pMetadata->data.unknown.dataSizeInBytes; bytesWritten += drwav__write_or_count(pWav, pMetadata->data.unknown.id, 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.unknown.dataSizeInBytes); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.unknown.pData, subchunkSize); } } if ((subchunkSize % 2) != 0) { bytesWritten += drwav__write_or_count_byte(pWav, 0); } } } if (hasListAdtl) { drwav_uint32 chunkSize = 4; /* start with 4 bytes for "adtl" */ for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { drwav_metadata* pMetadata = &pMetadatas[iMetadata]; switch (pMetadata->type) { case drwav_metadata_type_list_label: case drwav_metadata_type_list_note: { chunkSize += 8; /* for id and chunk size */ chunkSize += DRWAV_LIST_LABEL_OR_NOTE_BYTES; if (pMetadata->data.labelOrNote.stringLength > 0) { chunkSize += pMetadata->data.labelOrNote.stringLength + 1; } } break; case drwav_metadata_type_list_labelled_cue_region: { chunkSize += 8; /* for id and chunk size */ chunkSize += DRWAV_LIST_LABELLED_TEXT_BYTES; if (pMetadata->data.labelledCueRegion.stringLength > 0) { chunkSize += pMetadata->data.labelledCueRegion.stringLength + 1; } } break; case drwav_metadata_type_unknown: { if (pMetadata->data.unknown.chunkLocation == drwav_metadata_location_inside_adtl_list) { chunkSize += 8; /* for id and chunk size */ chunkSize += pMetadata->data.unknown.dataSizeInBytes; } } break; default: break; } if ((chunkSize % 2) != 0) { chunkSize += 1; } } bytesWritten += drwav__write_or_count(pWav, "LIST", 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, chunkSize); bytesWritten += drwav__write_or_count(pWav, "adtl", 4); for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { drwav_metadata* pMetadata = &pMetadatas[iMetadata]; drwav_uint32 subchunkSize = 0; switch (pMetadata->type) { case drwav_metadata_type_list_label: case drwav_metadata_type_list_note: { if (pMetadata->data.labelOrNote.stringLength > 0) { const char *pID = NULL; if (pMetadata->type == drwav_metadata_type_list_label) { pID = "labl"; } else if (pMetadata->type == drwav_metadata_type_list_note) { pID = "note"; } DRWAV_ASSERT(pID != NULL); DRWAV_ASSERT(pMetadata->data.labelOrNote.pString != NULL); subchunkSize = DRWAV_LIST_LABEL_OR_NOTE_BYTES; bytesWritten += drwav__write_or_count(pWav, pID, 4); subchunkSize += pMetadata->data.labelOrNote.stringLength + 1; bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, subchunkSize); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelOrNote.cuePointId); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.labelOrNote.pString, pMetadata->data.labelOrNote.stringLength); bytesWritten += drwav__write_or_count_byte(pWav, '\0'); } } break; case drwav_metadata_type_list_labelled_cue_region: { subchunkSize = DRWAV_LIST_LABELLED_TEXT_BYTES; bytesWritten += drwav__write_or_count(pWav, "ltxt", 4); if (pMetadata->data.labelledCueRegion.stringLength > 0) { subchunkSize += pMetadata->data.labelledCueRegion.stringLength + 1; } bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, subchunkSize); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelledCueRegion.cuePointId); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelledCueRegion.sampleLength); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.labelledCueRegion.purposeId, 4); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.country); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.language); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.dialect); bytesWritten += drwav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.codePage); if (pMetadata->data.labelledCueRegion.stringLength > 0) { DRWAV_ASSERT(pMetadata->data.labelledCueRegion.pString != NULL); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.labelledCueRegion.pString, pMetadata->data.labelledCueRegion.stringLength); bytesWritten += drwav__write_or_count_byte(pWav, '\0'); } } break; case drwav_metadata_type_unknown: { if (pMetadata->data.unknown.chunkLocation == drwav_metadata_location_inside_adtl_list) { subchunkSize = pMetadata->data.unknown.dataSizeInBytes; DRWAV_ASSERT(pMetadata->data.unknown.pData != NULL); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.unknown.id, 4); bytesWritten += drwav__write_or_count_u32ne_to_le(pWav, subchunkSize); bytesWritten += drwav__write_or_count(pWav, pMetadata->data.unknown.pData, subchunkSize); } } break; default: break; } if ((subchunkSize % 2) != 0) { bytesWritten += drwav__write_or_count_byte(pWav, 0); } } } DRWAV_ASSERT((bytesWritten % 2) == 0); return bytesWritten; } DRWAV_PRIVATE drwav_uint32 drwav__riff_chunk_size_riff(drwav_uint64 dataChunkSize, drwav_metadata* pMetadata, drwav_uint32 metadataCount) { drwav_uint64 chunkSize = 4 + 24 + (drwav_uint64)drwav__write_or_count_metadata(NULL, pMetadata, metadataCount) + 8 + dataChunkSize + drwav__chunk_padding_size_riff(dataChunkSize); /* 4 = "WAVE". 24 = "fmt " chunk. 8 = "data" + u32 data size. */ if (chunkSize > 0xFFFFFFFFUL) { chunkSize = 0xFFFFFFFFUL; } return (drwav_uint32)chunkSize; /* Safe cast due to the clamp above. */ } DRWAV_PRIVATE drwav_uint32 drwav__data_chunk_size_riff(drwav_uint64 dataChunkSize) { if (dataChunkSize <= 0xFFFFFFFFUL) { return (drwav_uint32)dataChunkSize; } else { return 0xFFFFFFFFUL; } } DRWAV_PRIVATE drwav_uint64 drwav__riff_chunk_size_w64(drwav_uint64 dataChunkSize) { drwav_uint64 dataSubchunkPaddingSize = drwav__chunk_padding_size_w64(dataChunkSize); return 80 + 24 + dataChunkSize + dataSubchunkPaddingSize; /* +24 because W64 includes the size of the GUID and size fields. */ } DRWAV_PRIVATE drwav_uint64 drwav__data_chunk_size_w64(drwav_uint64 dataChunkSize) { return 24 + dataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */ } DRWAV_PRIVATE drwav_uint64 drwav__riff_chunk_size_rf64(drwav_uint64 dataChunkSize, drwav_metadata *metadata, drwav_uint32 numMetadata) { drwav_uint64 chunkSize = 4 + 36 + 24 + (drwav_uint64)drwav__write_or_count_metadata(NULL, metadata, numMetadata) + 8 + dataChunkSize + drwav__chunk_padding_size_riff(dataChunkSize); /* 4 = "WAVE". 36 = "ds64" chunk. 24 = "fmt " chunk. 8 = "data" + u32 data size. */ if (chunkSize > 0xFFFFFFFFUL) { chunkSize = 0xFFFFFFFFUL; } return chunkSize; } DRWAV_PRIVATE drwav_uint64 drwav__data_chunk_size_rf64(drwav_uint64 dataChunkSize) { return dataChunkSize; } DRWAV_PRIVATE drwav_bool32 drwav_preinit_write(drwav* pWav, const drwav_data_format* pFormat, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pWav == NULL || onWrite == NULL) { return DRWAV_FALSE; } if (!isSequential && onSeek == NULL) { return DRWAV_FALSE; /* <-- onSeek is required when in non-sequential mode. */ } /* Not currently supporting compressed formats. Will need to add support for the "fact" chunk before we enable this. */ if (pFormat->format == DR_WAVE_FORMAT_EXTENSIBLE) { return DRWAV_FALSE; } if (pFormat->format == DR_WAVE_FORMAT_ADPCM || pFormat->format == DR_WAVE_FORMAT_DVI_ADPCM) { return DRWAV_FALSE; } DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav)); pWav->onWrite = onWrite; pWav->onSeek = onSeek; pWav->pUserData = pUserData; pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { return DRWAV_FALSE; /* Invalid allocation callbacks. */ } pWav->fmt.formatTag = (drwav_uint16)pFormat->format; pWav->fmt.channels = (drwav_uint16)pFormat->channels; pWav->fmt.sampleRate = pFormat->sampleRate; pWav->fmt.avgBytesPerSec = (drwav_uint32)((pFormat->bitsPerSample * pFormat->sampleRate * pFormat->channels) / 8); pWav->fmt.blockAlign = (drwav_uint16)((pFormat->channels * pFormat->bitsPerSample) / 8); pWav->fmt.bitsPerSample = (drwav_uint16)pFormat->bitsPerSample; pWav->fmt.extendedSize = 0; pWav->isSequentialWrite = isSequential; return DRWAV_TRUE; } DRWAV_PRIVATE drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount) { /* The function assumes drwav_preinit_write() was called beforehand. */ size_t runningPos = 0; drwav_uint64 initialDataChunkSize = 0; drwav_uint64 chunkSizeFMT; /* The initial values for the "RIFF" and "data" chunks depends on whether or not we are initializing in sequential mode or not. In sequential mode we set this to its final values straight away since they can be calculated from the total sample count. In non- sequential mode we initialize it all to zero and fill it out in drwav_uninit() using a backwards seek. */ if (pWav->isSequentialWrite) { initialDataChunkSize = (totalSampleCount * pWav->fmt.bitsPerSample) / 8; /* The RIFF container has a limit on the number of samples. drwav is not allowing this. There's no practical limits for Wave64 so for the sake of simplicity I'm not doing any validation for that. */ if (pFormat->container == drwav_container_riff) { if (initialDataChunkSize > (0xFFFFFFFFUL - 36)) { return DRWAV_FALSE; /* Not enough room to store every sample. */ } } } pWav->dataChunkDataSizeTargetWrite = initialDataChunkSize; /* "RIFF" chunk. */ if (pFormat->container == drwav_container_riff) { drwav_uint32 chunkSizeRIFF = 28 + (drwav_uint32)initialDataChunkSize; /* +28 = "WAVE" + [sizeof "fmt " chunk] */ runningPos += drwav__write(pWav, "RIFF", 4); runningPos += drwav__write_u32ne_to_le(pWav, chunkSizeRIFF); runningPos += drwav__write(pWav, "WAVE", 4); } else if (pFormat->container == drwav_container_w64) { drwav_uint64 chunkSizeRIFF = 80 + 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */ runningPos += drwav__write(pWav, drwavGUID_W64_RIFF, 16); runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeRIFF); runningPos += drwav__write(pWav, drwavGUID_W64_WAVE, 16); } else if (pFormat->container == drwav_container_rf64) { runningPos += drwav__write(pWav, "RF64", 4); runningPos += drwav__write_u32ne_to_le(pWav, 0xFFFFFFFF); /* Always 0xFFFFFFFF for RF64. Set to a proper value in the "ds64" chunk. */ runningPos += drwav__write(pWav, "WAVE", 4); } else { return DRWAV_FALSE; /* Container not supported for writing. */ } /* "ds64" chunk (RF64 only). */ if (pFormat->container == drwav_container_rf64) { drwav_uint32 initialds64ChunkSize = 28; /* 28 = [Size of RIFF (8 bytes)] + [Size of DATA (8 bytes)] + [Sample Count (8 bytes)] + [Table Length (4 bytes)]. Table length always set to 0. */ drwav_uint64 initialRiffChunkSize = 8 + initialds64ChunkSize + initialDataChunkSize; /* +8 for the ds64 header. */ runningPos += drwav__write(pWav, "ds64", 4); runningPos += drwav__write_u32ne_to_le(pWav, initialds64ChunkSize); /* Size of ds64. */ runningPos += drwav__write_u64ne_to_le(pWav, initialRiffChunkSize); /* Size of RIFF. Set to true value at the end. */ runningPos += drwav__write_u64ne_to_le(pWav, initialDataChunkSize); /* Size of DATA. Set to true value at the end. */ runningPos += drwav__write_u64ne_to_le(pWav, totalSampleCount); /* Sample count. */ runningPos += drwav__write_u32ne_to_le(pWav, 0); /* Table length. Always set to zero in our case since we're not doing any other chunks than "DATA". */ } /* "fmt " chunk. */ if (pFormat->container == drwav_container_riff || pFormat->container == drwav_container_rf64) { chunkSizeFMT = 16; runningPos += drwav__write(pWav, "fmt ", 4); runningPos += drwav__write_u32ne_to_le(pWav, (drwav_uint32)chunkSizeFMT); } else if (pFormat->container == drwav_container_w64) { chunkSizeFMT = 40; runningPos += drwav__write(pWav, drwavGUID_W64_FMT, 16); runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeFMT); } runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.formatTag); runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.channels); runningPos += drwav__write_u32ne_to_le(pWav, pWav->fmt.sampleRate); runningPos += drwav__write_u32ne_to_le(pWav, pWav->fmt.avgBytesPerSec); runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.blockAlign); runningPos += drwav__write_u16ne_to_le(pWav, pWav->fmt.bitsPerSample); /* TODO: is a 'fact' chunk required for DR_WAVE_FORMAT_IEEE_FLOAT? */ if (!pWav->isSequentialWrite && pWav->pMetadata != NULL && pWav->metadataCount > 0 && (pFormat->container == drwav_container_riff || pFormat->container == drwav_container_rf64)) { runningPos += drwav__write_or_count_metadata(pWav, pWav->pMetadata, pWav->metadataCount); } pWav->dataChunkDataPos = runningPos; /* "data" chunk. */ if (pFormat->container == drwav_container_riff) { drwav_uint32 chunkSizeDATA = (drwav_uint32)initialDataChunkSize; runningPos += drwav__write(pWav, "data", 4); runningPos += drwav__write_u32ne_to_le(pWav, chunkSizeDATA); } else if (pFormat->container == drwav_container_w64) { drwav_uint64 chunkSizeDATA = 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */ runningPos += drwav__write(pWav, drwavGUID_W64_DATA, 16); runningPos += drwav__write_u64ne_to_le(pWav, chunkSizeDATA); } else if (pFormat->container == drwav_container_rf64) { runningPos += drwav__write(pWav, "data", 4); runningPos += drwav__write_u32ne_to_le(pWav, 0xFFFFFFFF); /* Always set to 0xFFFFFFFF for RF64. The true size of the data chunk is specified in the ds64 chunk. */ } /* Set some properties for the client's convenience. */ pWav->container = pFormat->container; pWav->channels = (drwav_uint16)pFormat->channels; pWav->sampleRate = pFormat->sampleRate; pWav->bitsPerSample = (drwav_uint16)pFormat->bitsPerSample; pWav->translatedFormatTag = (drwav_uint16)pFormat->format; pWav->dataChunkDataPos = runningPos; return DRWAV_TRUE; } DRWAV_API drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) { if (!drwav_preinit_write(pWav, pFormat, DRWAV_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) { return DRWAV_FALSE; } return drwav_init_write__internal(pWav, pFormat, 0); /* DRWAV_FALSE = Not Sequential */ } DRWAV_API drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) { if (!drwav_preinit_write(pWav, pFormat, DRWAV_TRUE, onWrite, NULL, pUserData, pAllocationCallbacks)) { return DRWAV_FALSE; } return drwav_init_write__internal(pWav, pFormat, totalSampleCount); /* DRWAV_TRUE = Sequential */ } DRWAV_API drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pFormat == NULL) { return DRWAV_FALSE; } return drwav_init_write_sequential(pWav, pFormat, totalPCMFrameCount*pFormat->channels, onWrite, pUserData, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_write_with_metadata(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks, drwav_metadata* pMetadata, drwav_uint32 metadataCount) { if (!drwav_preinit_write(pWav, pFormat, DRWAV_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) { return DRWAV_FALSE; } pWav->pMetadata = pMetadata; pWav->metadataCount = metadataCount; return drwav_init_write__internal(pWav, pFormat, 0); } DRWAV_API drwav_uint64 drwav_target_write_size_bytes(const drwav_data_format* pFormat, drwav_uint64 totalFrameCount, drwav_metadata* pMetadata, drwav_uint32 metadataCount) { /* Casting totalFrameCount to drwav_int64 for VC6 compatibility. No issues in practice because nobody is going to exhaust the whole 63 bits. */ drwav_uint64 targetDataSizeBytes = (drwav_uint64)((drwav_int64)totalFrameCount * pFormat->channels * pFormat->bitsPerSample/8.0); drwav_uint64 riffChunkSizeBytes; drwav_uint64 fileSizeBytes = 0; if (pFormat->container == drwav_container_riff) { riffChunkSizeBytes = drwav__riff_chunk_size_riff(targetDataSizeBytes, pMetadata, metadataCount); fileSizeBytes = (8 + riffChunkSizeBytes); /* +8 because WAV doesn't include the size of the ChunkID and ChunkSize fields. */ } else if (pFormat->container == drwav_container_w64) { riffChunkSizeBytes = drwav__riff_chunk_size_w64(targetDataSizeBytes); fileSizeBytes = riffChunkSizeBytes; } else if (pFormat->container == drwav_container_rf64) { riffChunkSizeBytes = drwav__riff_chunk_size_rf64(targetDataSizeBytes, pMetadata, metadataCount); fileSizeBytes = (8 + riffChunkSizeBytes); /* +8 because WAV doesn't include the size of the ChunkID and ChunkSize fields. */ } return fileSizeBytes; } #ifndef DR_WAV_NO_STDIO /* Errno */ /* drwav_result_from_errno() is only used for fopen() and wfopen() so putting it inside DR_WAV_NO_STDIO for now. If something else needs this later we can move it out. */ #include <errno.h> DRWAV_PRIVATE drwav_result drwav_result_from_errno(int e) { switch (e) { case 0: return DRWAV_SUCCESS; #ifdef EPERM case EPERM: return DRWAV_INVALID_OPERATION; #endif #ifdef ENOENT case ENOENT: return DRWAV_DOES_NOT_EXIST; #endif #ifdef ESRCH case ESRCH: return DRWAV_DOES_NOT_EXIST; #endif #ifdef EINTR case EINTR: return DRWAV_INTERRUPT; #endif #ifdef EIO case EIO: return DRWAV_IO_ERROR; #endif #ifdef ENXIO case ENXIO: return DRWAV_DOES_NOT_EXIST; #endif #ifdef E2BIG case E2BIG: return DRWAV_INVALID_ARGS; #endif #ifdef ENOEXEC case ENOEXEC: return DRWAV_INVALID_FILE; #endif #ifdef EBADF case EBADF: return DRWAV_INVALID_FILE; #endif #ifdef ECHILD case ECHILD: return DRWAV_ERROR; #endif #ifdef EAGAIN case EAGAIN: return DRWAV_UNAVAILABLE; #endif #ifdef ENOMEM case ENOMEM: return DRWAV_OUT_OF_MEMORY; #endif #ifdef EACCES case EACCES: return DRWAV_ACCESS_DENIED; #endif #ifdef EFAULT case EFAULT: return DRWAV_BAD_ADDRESS; #endif #ifdef ENOTBLK case ENOTBLK: return DRWAV_ERROR; #endif #ifdef EBUSY case EBUSY: return DRWAV_BUSY; #endif #ifdef EEXIST case EEXIST: return DRWAV_ALREADY_EXISTS; #endif #ifdef EXDEV case EXDEV: return DRWAV_ERROR; #endif #ifdef ENODEV case ENODEV: return DRWAV_DOES_NOT_EXIST; #endif #ifdef ENOTDIR case ENOTDIR: return DRWAV_NOT_DIRECTORY; #endif #ifdef EISDIR case EISDIR: return DRWAV_IS_DIRECTORY; #endif #ifdef EINVAL case EINVAL: return DRWAV_INVALID_ARGS; #endif #ifdef ENFILE case ENFILE: return DRWAV_TOO_MANY_OPEN_FILES; #endif #ifdef EMFILE case EMFILE: return DRWAV_TOO_MANY_OPEN_FILES; #endif #ifdef ENOTTY case ENOTTY: return DRWAV_INVALID_OPERATION; #endif #ifdef ETXTBSY case ETXTBSY: return DRWAV_BUSY; #endif #ifdef EFBIG case EFBIG: return DRWAV_TOO_BIG; #endif #ifdef ENOSPC case ENOSPC: return DRWAV_NO_SPACE; #endif #ifdef ESPIPE case ESPIPE: return DRWAV_BAD_SEEK; #endif #ifdef EROFS case EROFS: return DRWAV_ACCESS_DENIED; #endif #ifdef EMLINK case EMLINK: return DRWAV_TOO_MANY_LINKS; #endif #ifdef EPIPE case EPIPE: return DRWAV_BAD_PIPE; #endif #ifdef EDOM case EDOM: return DRWAV_OUT_OF_RANGE; #endif #ifdef ERANGE case ERANGE: return DRWAV_OUT_OF_RANGE; #endif #ifdef EDEADLK case EDEADLK: return DRWAV_DEADLOCK; #endif #ifdef ENAMETOOLONG case ENAMETOOLONG: return DRWAV_PATH_TOO_LONG; #endif #ifdef ENOLCK case ENOLCK: return DRWAV_ERROR; #endif #ifdef ENOSYS case ENOSYS: return DRWAV_NOT_IMPLEMENTED; #endif #ifdef ENOTEMPTY case ENOTEMPTY: return DRWAV_DIRECTORY_NOT_EMPTY; #endif #ifdef ELOOP case ELOOP: return DRWAV_TOO_MANY_LINKS; #endif #ifdef ENOMSG case ENOMSG: return DRWAV_NO_MESSAGE; #endif #ifdef EIDRM case EIDRM: return DRWAV_ERROR; #endif #ifdef ECHRNG case ECHRNG: return DRWAV_ERROR; #endif #ifdef EL2NSYNC case EL2NSYNC: return DRWAV_ERROR; #endif #ifdef EL3HLT case EL3HLT: return DRWAV_ERROR; #endif #ifdef EL3RST case EL3RST: return DRWAV_ERROR; #endif #ifdef ELNRNG case ELNRNG: return DRWAV_OUT_OF_RANGE; #endif #ifdef EUNATCH case EUNATCH: return DRWAV_ERROR; #endif #ifdef ENOCSI case ENOCSI: return DRWAV_ERROR; #endif #ifdef EL2HLT case EL2HLT: return DRWAV_ERROR; #endif #ifdef EBADE case EBADE: return DRWAV_ERROR; #endif #ifdef EBADR case EBADR: return DRWAV_ERROR; #endif #ifdef EXFULL case EXFULL: return DRWAV_ERROR; #endif #ifdef ENOANO case ENOANO: return DRWAV_ERROR; #endif #ifdef EBADRQC case EBADRQC: return DRWAV_ERROR; #endif #ifdef EBADSLT case EBADSLT: return DRWAV_ERROR; #endif #ifdef EBFONT case EBFONT: return DRWAV_INVALID_FILE; #endif #ifdef ENOSTR case ENOSTR: return DRWAV_ERROR; #endif #ifdef ENODATA case ENODATA: return DRWAV_NO_DATA_AVAILABLE; #endif #ifdef ETIME case ETIME: return DRWAV_TIMEOUT; #endif #ifdef ENOSR case ENOSR: return DRWAV_NO_DATA_AVAILABLE; #endif #ifdef ENONET case ENONET: return DRWAV_NO_NETWORK; #endif #ifdef ENOPKG case ENOPKG: return DRWAV_ERROR; #endif #ifdef EREMOTE case EREMOTE: return DRWAV_ERROR; #endif #ifdef ENOLINK case ENOLINK: return DRWAV_ERROR; #endif #ifdef EADV case EADV: return DRWAV_ERROR; #endif #ifdef ESRMNT case ESRMNT: return DRWAV_ERROR; #endif #ifdef ECOMM case ECOMM: return DRWAV_ERROR; #endif #ifdef EPROTO case EPROTO: return DRWAV_ERROR; #endif #ifdef EMULTIHOP case EMULTIHOP: return DRWAV_ERROR; #endif #ifdef EDOTDOT case EDOTDOT: return DRWAV_ERROR; #endif #ifdef EBADMSG case EBADMSG: return DRWAV_BAD_MESSAGE; #endif #ifdef EOVERFLOW case EOVERFLOW: return DRWAV_TOO_BIG; #endif #ifdef ENOTUNIQ case ENOTUNIQ: return DRWAV_NOT_UNIQUE; #endif #ifdef EBADFD case EBADFD: return DRWAV_ERROR; #endif #ifdef EREMCHG case EREMCHG: return DRWAV_ERROR; #endif #ifdef ELIBACC case ELIBACC: return DRWAV_ACCESS_DENIED; #endif #ifdef ELIBBAD case ELIBBAD: return DRWAV_INVALID_FILE; #endif #ifdef ELIBSCN case ELIBSCN: return DRWAV_INVALID_FILE; #endif #ifdef ELIBMAX case ELIBMAX: return DRWAV_ERROR; #endif #ifdef ELIBEXEC case ELIBEXEC: return DRWAV_ERROR; #endif #ifdef EILSEQ case EILSEQ: return DRWAV_INVALID_DATA; #endif #ifdef ERESTART case ERESTART: return DRWAV_ERROR; #endif #ifdef ESTRPIPE case ESTRPIPE: return DRWAV_ERROR; #endif #ifdef EUSERS case EUSERS: return DRWAV_ERROR; #endif #ifdef ENOTSOCK case ENOTSOCK: return DRWAV_NOT_SOCKET; #endif #ifdef EDESTADDRREQ case EDESTADDRREQ: return DRWAV_NO_ADDRESS; #endif #ifdef EMSGSIZE case EMSGSIZE: return DRWAV_TOO_BIG; #endif #ifdef EPROTOTYPE case EPROTOTYPE: return DRWAV_BAD_PROTOCOL; #endif #ifdef ENOPROTOOPT case ENOPROTOOPT: return DRWAV_PROTOCOL_UNAVAILABLE; #endif #ifdef EPROTONOSUPPORT case EPROTONOSUPPORT: return DRWAV_PROTOCOL_NOT_SUPPORTED; #endif #ifdef ESOCKTNOSUPPORT case ESOCKTNOSUPPORT: return DRWAV_SOCKET_NOT_SUPPORTED; #endif #ifdef EOPNOTSUPP case EOPNOTSUPP: return DRWAV_INVALID_OPERATION; #endif #ifdef EPFNOSUPPORT case EPFNOSUPPORT: return DRWAV_PROTOCOL_FAMILY_NOT_SUPPORTED; #endif #ifdef EAFNOSUPPORT case EAFNOSUPPORT: return DRWAV_ADDRESS_FAMILY_NOT_SUPPORTED; #endif #ifdef EADDRINUSE case EADDRINUSE: return DRWAV_ALREADY_IN_USE; #endif #ifdef EADDRNOTAVAIL case EADDRNOTAVAIL: return DRWAV_ERROR; #endif #ifdef ENETDOWN case ENETDOWN: return DRWAV_NO_NETWORK; #endif #ifdef ENETUNREACH case ENETUNREACH: return DRWAV_NO_NETWORK; #endif #ifdef ENETRESET case ENETRESET: return DRWAV_NO_NETWORK; #endif #ifdef ECONNABORTED case ECONNABORTED: return DRWAV_NO_NETWORK; #endif #ifdef ECONNRESET case ECONNRESET: return DRWAV_CONNECTION_RESET; #endif #ifdef ENOBUFS case ENOBUFS: return DRWAV_NO_SPACE; #endif #ifdef EISCONN case EISCONN: return DRWAV_ALREADY_CONNECTED; #endif #ifdef ENOTCONN case ENOTCONN: return DRWAV_NOT_CONNECTED; #endif #ifdef ESHUTDOWN case ESHUTDOWN: return DRWAV_ERROR; #endif #ifdef ETOOMANYREFS case ETOOMANYREFS: return DRWAV_ERROR; #endif #ifdef ETIMEDOUT case ETIMEDOUT: return DRWAV_TIMEOUT; #endif #ifdef ECONNREFUSED case ECONNREFUSED: return DRWAV_CONNECTION_REFUSED; #endif #ifdef EHOSTDOWN case EHOSTDOWN: return DRWAV_NO_HOST; #endif #ifdef EHOSTUNREACH case EHOSTUNREACH: return DRWAV_NO_HOST; #endif #ifdef EALREADY case EALREADY: return DRWAV_IN_PROGRESS; #endif #ifdef EINPROGRESS case EINPROGRESS: return DRWAV_IN_PROGRESS; #endif #ifdef ESTALE case ESTALE: return DRWAV_INVALID_FILE; #endif #ifdef EUCLEAN case EUCLEAN: return DRWAV_ERROR; #endif #ifdef ENOTNAM case ENOTNAM: return DRWAV_ERROR; #endif #ifdef ENAVAIL case ENAVAIL: return DRWAV_ERROR; #endif #ifdef EISNAM case EISNAM: return DRWAV_ERROR; #endif #ifdef EREMOTEIO case EREMOTEIO: return DRWAV_IO_ERROR; #endif #ifdef EDQUOT case EDQUOT: return DRWAV_NO_SPACE; #endif #ifdef ENOMEDIUM case ENOMEDIUM: return DRWAV_DOES_NOT_EXIST; #endif #ifdef EMEDIUMTYPE case EMEDIUMTYPE: return DRWAV_ERROR; #endif #ifdef ECANCELED case ECANCELED: return DRWAV_CANCELLED; #endif #ifdef ENOKEY case ENOKEY: return DRWAV_ERROR; #endif #ifdef EKEYEXPIRED case EKEYEXPIRED: return DRWAV_ERROR; #endif #ifdef EKEYREVOKED case EKEYREVOKED: return DRWAV_ERROR; #endif #ifdef EKEYREJECTED case EKEYREJECTED: return DRWAV_ERROR; #endif #ifdef EOWNERDEAD case EOWNERDEAD: return DRWAV_ERROR; #endif #ifdef ENOTRECOVERABLE case ENOTRECOVERABLE: return DRWAV_ERROR; #endif #ifdef ERFKILL case ERFKILL: return DRWAV_ERROR; #endif #ifdef EHWPOISON case EHWPOISON: return DRWAV_ERROR; #endif default: return DRWAV_ERROR; } } /* End Errno */ /* fopen */ DRWAV_PRIVATE drwav_result drwav_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) { #if defined(_MSC_VER) && _MSC_VER >= 1400 errno_t err; #endif if (ppFile != NULL) { *ppFile = NULL; /* Safety. */ } if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { return DRWAV_INVALID_ARGS; } #if defined(_MSC_VER) && _MSC_VER >= 1400 err = fopen_s(ppFile, pFilePath, pOpenMode); if (err != 0) { return drwav_result_from_errno(err); } #else #if defined(_WIN32) || defined(__APPLE__) *ppFile = fopen(pFilePath, pOpenMode); #else #if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS == 64 && defined(_LARGEFILE64_SOURCE) *ppFile = fopen64(pFilePath, pOpenMode); #else *ppFile = fopen(pFilePath, pOpenMode); #endif #endif if (*ppFile == NULL) { drwav_result result = drwav_result_from_errno(errno); if (result == DRWAV_SUCCESS) { result = DRWAV_ERROR; /* Just a safety check to make sure we never ever return success when pFile == NULL. */ } return result; } #endif return DRWAV_SUCCESS; } /* _wfopen() isn't always available in all compilation environments. * Windows only. * MSVC seems to support it universally as far back as VC6 from what I can tell (haven't checked further back). * MinGW-64 (both 32- and 64-bit) seems to support it. * MinGW wraps it in !defined(__STRICT_ANSI__). * OpenWatcom wraps it in !defined(_NO_EXT_KEYS). This can be reviewed as compatibility issues arise. The preference is to use _wfopen_s() and _wfopen() as opposed to the wcsrtombs() fallback, so if you notice your compiler not detecting this properly I'm happy to look at adding support. */ #if defined(_WIN32) #if defined(_MSC_VER) || defined(__MINGW64__) || (!defined(__STRICT_ANSI__) && !defined(_NO_EXT_KEYS)) #define DRWAV_HAS_WFOPEN #endif #endif #ifndef DR_WAV_NO_WCHAR DRWAV_PRIVATE drwav_result drwav_wfopen(FILE** ppFile, const wchar_t* pFilePath, const wchar_t* pOpenMode, const drwav_allocation_callbacks* pAllocationCallbacks) { if (ppFile != NULL) { *ppFile = NULL; /* Safety. */ } if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { return DRWAV_INVALID_ARGS; } #if defined(DRWAV_HAS_WFOPEN) { /* Use _wfopen() on Windows. */ #if defined(_MSC_VER) && _MSC_VER >= 1400 errno_t err = _wfopen_s(ppFile, pFilePath, pOpenMode); if (err != 0) { return drwav_result_from_errno(err); } #else *ppFile = _wfopen(pFilePath, pOpenMode); if (*ppFile == NULL) { return drwav_result_from_errno(errno); } #endif (void)pAllocationCallbacks; } #else /* Use fopen() on anything other than Windows. Requires a conversion. This is annoying because fopen() is locale specific. The only real way I can think of to do this is with wcsrtombs(). Note that wcstombs() is apparently not thread-safe because it uses a static global mbstate_t object for maintaining state. I've checked this with -std=c89 and it works, but if somebody get's a compiler error I'll look into improving compatibility. */ /* Some compilers don't support wchar_t or wcsrtombs() which we're using below. In this case we just need to abort with an error. If you encounter a compiler lacking such support, add it to this list and submit a bug report and it'll be added to the library upstream. */ #if defined(__DJGPP__) { /* Nothing to do here. This will fall through to the error check below. */ } #else { mbstate_t mbs; size_t lenMB; const wchar_t* pFilePathTemp = pFilePath; char* pFilePathMB = NULL; char pOpenModeMB[32] = {0}; /* Get the length first. */ DRWAV_ZERO_OBJECT(&mbs); lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs); if (lenMB == (size_t)-1) { return drwav_result_from_errno(errno); } pFilePathMB = (char*)drwav__malloc_from_callbacks(lenMB + 1, pAllocationCallbacks); if (pFilePathMB == NULL) { return DRWAV_OUT_OF_MEMORY; } pFilePathTemp = pFilePath; DRWAV_ZERO_OBJECT(&mbs); wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs); /* The open mode should always consist of ASCII characters so we should be able to do a trivial conversion. */ { size_t i = 0; for (;;) { if (pOpenMode[i] == 0) { pOpenModeMB[i] = '\0'; break; } pOpenModeMB[i] = (char)pOpenMode[i]; i += 1; } } *ppFile = fopen(pFilePathMB, pOpenModeMB); drwav__free_from_callbacks(pFilePathMB, pAllocationCallbacks); } #endif if (*ppFile == NULL) { return DRWAV_ERROR; } #endif return DRWAV_SUCCESS; } #endif /* End fopen */ DRWAV_PRIVATE size_t drwav__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) { return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData); } DRWAV_PRIVATE size_t drwav__on_write_stdio(void* pUserData, const void* pData, size_t bytesToWrite) { return fwrite(pData, 1, bytesToWrite, (FILE*)pUserData); } DRWAV_PRIVATE drwav_bool32 drwav__on_seek_stdio(void* pUserData, int offset, drwav_seek_origin origin) { return fseek((FILE*)pUserData, offset, (origin == drwav_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; } DRWAV_API drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_file_ex(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); } DRWAV_PRIVATE drwav_bool32 drwav_init_file__internal_FILE(drwav* pWav, FILE* pFile, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav_bool32 result; result = drwav_preinit(pWav, drwav__on_read_stdio, drwav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); if (result != DRWAV_TRUE) { fclose(pFile); return result; } result = drwav_init__internal(pWav, onChunk, pChunkUserData, flags); if (result != DRWAV_TRUE) { fclose(pFile); return result; } return DRWAV_TRUE; } DRWAV_API drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { FILE* pFile; if (drwav_fopen(&pFile, filename, "rb") != DRWAV_SUCCESS) { return DRWAV_FALSE; } /* This takes ownership of the FILE* object. */ return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); } #ifndef DR_WAV_NO_WCHAR DRWAV_API drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* filename, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_file_ex_w(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { FILE* pFile; if (drwav_wfopen(&pFile, filename, L"rb", pAllocationCallbacks) != DRWAV_SUCCESS) { return DRWAV_FALSE; } /* This takes ownership of the FILE* object. */ return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); } #endif DRWAV_API drwav_bool32 drwav_init_file_with_metadata(drwav* pWav, const char* filename, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { FILE* pFile; if (drwav_fopen(&pFile, filename, "rb") != DRWAV_SUCCESS) { return DRWAV_FALSE; } /* This takes ownership of the FILE* object. */ return drwav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | DRWAV_WITH_METADATA, pAllocationCallbacks); } #ifndef DR_WAV_NO_WCHAR DRWAV_API drwav_bool32 drwav_init_file_with_metadata_w(drwav* pWav, const wchar_t* filename, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { FILE* pFile; if (drwav_wfopen(&pFile, filename, L"rb", pAllocationCallbacks) != DRWAV_SUCCESS) { return DRWAV_FALSE; } /* This takes ownership of the FILE* object. */ return drwav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | DRWAV_WITH_METADATA, pAllocationCallbacks); } #endif DRWAV_PRIVATE drwav_bool32 drwav_init_file_write__internal_FILE(drwav* pWav, FILE* pFile, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav_bool32 result; result = drwav_preinit_write(pWav, pFormat, isSequential, drwav__on_write_stdio, drwav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); if (result != DRWAV_TRUE) { fclose(pFile); return result; } result = drwav_init_write__internal(pWav, pFormat, totalSampleCount); if (result != DRWAV_TRUE) { fclose(pFile); return result; } return DRWAV_TRUE; } DRWAV_PRIVATE drwav_bool32 drwav_init_file_write__internal(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) { FILE* pFile; if (drwav_fopen(&pFile, filename, "wb") != DRWAV_SUCCESS) { return DRWAV_FALSE; } /* This takes ownership of the FILE* object. */ return drwav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); } #ifndef DR_WAV_NO_WCHAR DRWAV_PRIVATE drwav_bool32 drwav_init_file_write_w__internal(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) { FILE* pFile; if (drwav_wfopen(&pFile, filename, L"wb", pAllocationCallbacks) != DRWAV_SUCCESS) { return DRWAV_FALSE; } /* This takes ownership of the FILE* object. */ return drwav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); } #endif DRWAV_API drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_file_write__internal(pWav, filename, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_file_write__internal(pWav, filename, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pFormat == NULL) { return DRWAV_FALSE; } return drwav_init_file_write_sequential(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); } #ifndef DR_WAV_NO_WCHAR DRWAV_API drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_file_write_w__internal(pWav, filename, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_file_write_w__internal(pWav, filename, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pFormat == NULL) { return DRWAV_FALSE; } return drwav_init_file_write_sequential_w(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); } #endif #endif /* DR_WAV_NO_STDIO */ DRWAV_PRIVATE size_t drwav__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead) { drwav* pWav = (drwav*)pUserData; size_t bytesRemaining; DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->memoryStream.dataSize >= pWav->memoryStream.currentReadPos); bytesRemaining = pWav->memoryStream.dataSize - pWav->memoryStream.currentReadPos; if (bytesToRead > bytesRemaining) { bytesToRead = bytesRemaining; } if (bytesToRead > 0) { DRWAV_COPY_MEMORY(pBufferOut, pWav->memoryStream.data + pWav->memoryStream.currentReadPos, bytesToRead); pWav->memoryStream.currentReadPos += bytesToRead; } return bytesToRead; } DRWAV_PRIVATE drwav_bool32 drwav__on_seek_memory(void* pUserData, int offset, drwav_seek_origin origin) { drwav* pWav = (drwav*)pUserData; DRWAV_ASSERT(pWav != NULL); if (origin == drwav_seek_origin_current) { if (offset > 0) { if (pWav->memoryStream.currentReadPos + offset > pWav->memoryStream.dataSize) { return DRWAV_FALSE; /* Trying to seek too far forward. */ } } else { if (pWav->memoryStream.currentReadPos < (size_t)-offset) { return DRWAV_FALSE; /* Trying to seek too far backwards. */ } } /* This will never underflow thanks to the clamps above. */ pWav->memoryStream.currentReadPos += offset; } else { if ((drwav_uint32)offset <= pWav->memoryStream.dataSize) { pWav->memoryStream.currentReadPos = offset; } else { return DRWAV_FALSE; /* Trying to seek too far forward. */ } } return DRWAV_TRUE; } DRWAV_PRIVATE size_t drwav__on_write_memory(void* pUserData, const void* pDataIn, size_t bytesToWrite) { drwav* pWav = (drwav*)pUserData; size_t bytesRemaining; DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(pWav->memoryStreamWrite.dataCapacity >= pWav->memoryStreamWrite.currentWritePos); bytesRemaining = pWav->memoryStreamWrite.dataCapacity - pWav->memoryStreamWrite.currentWritePos; if (bytesRemaining < bytesToWrite) { /* Need to reallocate. */ void* pNewData; size_t newDataCapacity = (pWav->memoryStreamWrite.dataCapacity == 0) ? 256 : pWav->memoryStreamWrite.dataCapacity * 2; /* If doubling wasn't enough, just make it the minimum required size to write the data. */ if ((newDataCapacity - pWav->memoryStreamWrite.currentWritePos) < bytesToWrite) { newDataCapacity = pWav->memoryStreamWrite.currentWritePos + bytesToWrite; } pNewData = drwav__realloc_from_callbacks(*pWav->memoryStreamWrite.ppData, newDataCapacity, pWav->memoryStreamWrite.dataCapacity, &pWav->allocationCallbacks); if (pNewData == NULL) { return 0; } *pWav->memoryStreamWrite.ppData = pNewData; pWav->memoryStreamWrite.dataCapacity = newDataCapacity; } DRWAV_COPY_MEMORY(((drwav_uint8*)(*pWav->memoryStreamWrite.ppData)) + pWav->memoryStreamWrite.currentWritePos, pDataIn, bytesToWrite); pWav->memoryStreamWrite.currentWritePos += bytesToWrite; if (pWav->memoryStreamWrite.dataSize < pWav->memoryStreamWrite.currentWritePos) { pWav->memoryStreamWrite.dataSize = pWav->memoryStreamWrite.currentWritePos; } *pWav->memoryStreamWrite.pDataSize = pWav->memoryStreamWrite.dataSize; return bytesToWrite; } DRWAV_PRIVATE drwav_bool32 drwav__on_seek_memory_write(void* pUserData, int offset, drwav_seek_origin origin) { drwav* pWav = (drwav*)pUserData; DRWAV_ASSERT(pWav != NULL); if (origin == drwav_seek_origin_current) { if (offset > 0) { if (pWav->memoryStreamWrite.currentWritePos + offset > pWav->memoryStreamWrite.dataSize) { offset = (int)(pWav->memoryStreamWrite.dataSize - pWav->memoryStreamWrite.currentWritePos); /* Trying to seek too far forward. */ } } else { if (pWav->memoryStreamWrite.currentWritePos < (size_t)-offset) { offset = -(int)pWav->memoryStreamWrite.currentWritePos; /* Trying to seek too far backwards. */ } } /* This will never underflow thanks to the clamps above. */ pWav->memoryStreamWrite.currentWritePos += offset; } else { if ((drwav_uint32)offset <= pWav->memoryStreamWrite.dataSize) { pWav->memoryStreamWrite.currentWritePos = offset; } else { pWav->memoryStreamWrite.currentWritePos = pWav->memoryStreamWrite.dataSize; /* Trying to seek too far forward. */ } } return DRWAV_TRUE; } DRWAV_API drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_memory_ex(pWav, data, dataSize, NULL, NULL, 0, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { if (data == NULL || dataSize == 0) { return DRWAV_FALSE; } if (!drwav_preinit(pWav, drwav__on_read_memory, drwav__on_seek_memory, pWav, pAllocationCallbacks)) { return DRWAV_FALSE; } pWav->memoryStream.data = (const drwav_uint8*)data; pWav->memoryStream.dataSize = dataSize; pWav->memoryStream.currentReadPos = 0; return drwav_init__internal(pWav, onChunk, pChunkUserData, flags); } DRWAV_API drwav_bool32 drwav_init_memory_with_metadata(drwav* pWav, const void* data, size_t dataSize, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks) { if (data == NULL || dataSize == 0) { return DRWAV_FALSE; } if (!drwav_preinit(pWav, drwav__on_read_memory, drwav__on_seek_memory, pWav, pAllocationCallbacks)) { return DRWAV_FALSE; } pWav->memoryStream.data = (const drwav_uint8*)data; pWav->memoryStream.dataSize = dataSize; pWav->memoryStream.currentReadPos = 0; return drwav_init__internal(pWav, NULL, NULL, flags | DRWAV_WITH_METADATA); } DRWAV_PRIVATE drwav_bool32 drwav_init_memory_write__internal(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks) { if (ppData == NULL || pDataSize == NULL) { return DRWAV_FALSE; } *ppData = NULL; /* Important because we're using realloc()! */ *pDataSize = 0; if (!drwav_preinit_write(pWav, pFormat, isSequential, drwav__on_write_memory, drwav__on_seek_memory_write, pWav, pAllocationCallbacks)) { return DRWAV_FALSE; } pWav->memoryStreamWrite.ppData = ppData; pWav->memoryStreamWrite.pDataSize = pDataSize; pWav->memoryStreamWrite.dataSize = 0; pWav->memoryStreamWrite.dataCapacity = 0; pWav->memoryStreamWrite.currentWritePos = 0; return drwav_init_write__internal(pWav, pFormat, totalSampleCount); } DRWAV_API drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks) { return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks); } DRWAV_API drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pFormat == NULL) { return DRWAV_FALSE; } return drwav_init_memory_write_sequential(pWav, ppData, pDataSize, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); } DRWAV_API drwav_result drwav_uninit(drwav* pWav) { drwav_result result = DRWAV_SUCCESS; if (pWav == NULL) { return DRWAV_INVALID_ARGS; } /* If the drwav object was opened in write mode we'll need to finalize a few things: - Make sure the "data" chunk is aligned to 16-bits for RIFF containers, or 64 bits for W64 containers. - Set the size of the "data" chunk. */ if (pWav->onWrite != NULL) { drwav_uint32 paddingSize = 0; /* Padding. Do not adjust pWav->dataChunkDataSize - this should not include the padding. */ if (pWav->container == drwav_container_riff || pWav->container == drwav_container_rf64) { paddingSize = drwav__chunk_padding_size_riff(pWav->dataChunkDataSize); } else { paddingSize = drwav__chunk_padding_size_w64(pWav->dataChunkDataSize); } if (paddingSize > 0) { drwav_uint64 paddingData = 0; drwav__write(pWav, &paddingData, paddingSize); /* Byte order does not matter for this. */ } /* Chunk sizes. When using sequential mode, these will have been filled in at initialization time. We only need to do this when using non-sequential mode. */ if (pWav->onSeek && !pWav->isSequentialWrite) { if (pWav->container == drwav_container_riff) { /* The "RIFF" chunk size. */ if (pWav->onSeek(pWav->pUserData, 4, drwav_seek_origin_start)) { drwav_uint32 riffChunkSize = drwav__riff_chunk_size_riff(pWav->dataChunkDataSize, pWav->pMetadata, pWav->metadataCount); drwav__write_u32ne_to_le(pWav, riffChunkSize); } /* The "data" chunk size. */ if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos - 4, drwav_seek_origin_start)) { drwav_uint32 dataChunkSize = drwav__data_chunk_size_riff(pWav->dataChunkDataSize); drwav__write_u32ne_to_le(pWav, dataChunkSize); } } else if (pWav->container == drwav_container_w64) { /* The "RIFF" chunk size. */ if (pWav->onSeek(pWav->pUserData, 16, drwav_seek_origin_start)) { drwav_uint64 riffChunkSize = drwav__riff_chunk_size_w64(pWav->dataChunkDataSize); drwav__write_u64ne_to_le(pWav, riffChunkSize); } /* The "data" chunk size. */ if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos - 8, drwav_seek_origin_start)) { drwav_uint64 dataChunkSize = drwav__data_chunk_size_w64(pWav->dataChunkDataSize); drwav__write_u64ne_to_le(pWav, dataChunkSize); } } else if (pWav->container == drwav_container_rf64) { /* We only need to update the ds64 chunk. The "RIFF" and "data" chunks always have their sizes set to 0xFFFFFFFF for RF64. */ int ds64BodyPos = 12 + 8; /* The "RIFF" chunk size. */ if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 0, drwav_seek_origin_start)) { drwav_uint64 riffChunkSize = drwav__riff_chunk_size_rf64(pWav->dataChunkDataSize, pWav->pMetadata, pWav->metadataCount); drwav__write_u64ne_to_le(pWav, riffChunkSize); } /* The "data" chunk size. */ if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 8, drwav_seek_origin_start)) { drwav_uint64 dataChunkSize = drwav__data_chunk_size_rf64(pWav->dataChunkDataSize); drwav__write_u64ne_to_le(pWav, dataChunkSize); } } } /* Validation for sequential mode. */ if (pWav->isSequentialWrite) { if (pWav->dataChunkDataSize != pWav->dataChunkDataSizeTargetWrite) { result = DRWAV_INVALID_FILE; } } } else { drwav_free(pWav->pMetadata, &pWav->allocationCallbacks); } #ifndef DR_WAV_NO_STDIO /* If we opened the file with drwav_open_file() we will want to close the file handle. We can know whether or not drwav_open_file() was used by looking at the onRead and onSeek callbacks. */ if (pWav->onRead == drwav__on_read_stdio || pWav->onWrite == drwav__on_write_stdio) { fclose((FILE*)pWav->pUserData); } #endif return result; } DRWAV_API size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut) { size_t bytesRead; drwav_uint32 bytesPerFrame; if (pWav == NULL || bytesToRead == 0) { return 0; /* Invalid args. */ } if (bytesToRead > pWav->bytesRemaining) { bytesToRead = (size_t)pWav->bytesRemaining; } if (bytesToRead == 0) { return 0; /* At end. */ } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; /* Could not determine the bytes per frame. */ } if (pBufferOut != NULL) { bytesRead = pWav->onRead(pWav->pUserData, pBufferOut, bytesToRead); } else { /* We need to seek. If we fail, we need to read-and-discard to make sure we get a good byte count. */ bytesRead = 0; while (bytesRead < bytesToRead) { size_t bytesToSeek = (bytesToRead - bytesRead); if (bytesToSeek > 0x7FFFFFFF) { bytesToSeek = 0x7FFFFFFF; } if (pWav->onSeek(pWav->pUserData, (int)bytesToSeek, drwav_seek_origin_current) == DRWAV_FALSE) { break; } bytesRead += bytesToSeek; } /* When we get here we may need to read-and-discard some data. */ while (bytesRead < bytesToRead) { drwav_uint8 buffer[4096]; size_t bytesSeeked; size_t bytesToSeek = (bytesToRead - bytesRead); if (bytesToSeek > sizeof(buffer)) { bytesToSeek = sizeof(buffer); } bytesSeeked = pWav->onRead(pWav->pUserData, buffer, bytesToSeek); bytesRead += bytesSeeked; if (bytesSeeked < bytesToSeek) { break; /* Reached the end. */ } } } pWav->readCursorInPCMFrames += bytesRead / bytesPerFrame; pWav->bytesRemaining -= bytesRead; return bytesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut) { drwav_uint32 bytesPerFrame; drwav_uint64 bytesToRead; /* Intentionally uint64 instead of size_t so we can do a check that we're not reading too much on 32-bit builds. */ drwav_uint64 framesRemainingInFile; if (pWav == NULL || framesToRead == 0) { return 0; } /* Cannot use this function for compressed formats. */ if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) { return 0; } framesRemainingInFile = pWav->totalPCMFrameCount - pWav->readCursorInPCMFrames; if (framesToRead > framesRemainingInFile) { framesToRead = framesRemainingInFile; } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } /* Don't try to read more samples than can potentially fit in the output buffer. */ bytesToRead = framesToRead * bytesPerFrame; if (bytesToRead > DRWAV_SIZE_MAX) { bytesToRead = (DRWAV_SIZE_MAX / bytesPerFrame) * bytesPerFrame; /* Round the number of bytes to read to a clean frame boundary. */ } /* Doing an explicit check here just to make it clear that we don't want to be attempt to read anything if there's no bytes to read. There *could* be a time where it evaluates to 0 due to overflowing. */ if (bytesToRead == 0) { return 0; } return drwav_read_raw(pWav, (size_t)bytesToRead, pBufferOut) / bytesPerFrame; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut) { drwav_uint64 framesRead = drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); if (pBufferOut != NULL) { drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; /* Could not get the bytes per frame which means bytes per sample cannot be determined and we don't know how to byte swap. */ } drwav__bswap_samples(pBufferOut, framesRead*pWav->channels, bytesPerFrame/pWav->channels); } return framesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut) { drwav_uint64 framesRead = 0; if (drwav_is_container_be(pWav->container)) { /* Special case for AIFF. AIFF is a big-endian encoded format, but it supports a format that is PCM in little-endian encoding. In this case, we fall through this branch and treate it as little-endian. */ if (pWav->container != drwav_container_aiff || pWav->aiff.isLE == DRWAV_FALSE) { if (drwav__is_little_endian()) { framesRead = drwav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); } else { framesRead = drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); } goto post_process; } } /* Getting here means the data should be considered little-endian. */ if (drwav__is_little_endian()) { framesRead = drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); } else { framesRead = drwav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); } /* Here is where we check if we need to do a signed/unsigned conversion for AIFF. The reason we need to do this is because dr_wav always assumes an 8-bit sample is unsigned, whereas AIFF can have signed 8-bit formats. */ post_process: { if (pWav->container == drwav_container_aiff && pWav->bitsPerSample == 8 && pWav->aiff.isUnsigned == DRWAV_FALSE) { if (pBufferOut != NULL) { drwav_uint64 iSample; for (iSample = 0; iSample < framesRead * pWav->channels; iSample += 1) { ((drwav_uint8*)pBufferOut)[iSample] += 128; } } } } return framesRead; } DRWAV_PRIVATE drwav_bool32 drwav_seek_to_first_pcm_frame(drwav* pWav) { if (pWav->onWrite != NULL) { return DRWAV_FALSE; /* No seeking in write mode. */ } if (!pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos, drwav_seek_origin_start)) { return DRWAV_FALSE; } if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) { /* Cached data needs to be cleared for compressed formats. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { DRWAV_ZERO_OBJECT(&pWav->msadpcm); } else if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { DRWAV_ZERO_OBJECT(&pWav->ima); } else { DRWAV_ASSERT(DRWAV_FALSE); /* If this assertion is triggered it means I've implemented a new compressed format but forgot to add a branch for it here. */ } } pWav->readCursorInPCMFrames = 0; pWav->bytesRemaining = pWav->dataChunkDataSize; return DRWAV_TRUE; } DRWAV_API drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetFrameIndex) { /* Seeking should be compatible with wave files > 2GB. */ if (pWav == NULL || pWav->onSeek == NULL) { return DRWAV_FALSE; } /* No seeking in write mode. */ if (pWav->onWrite != NULL) { return DRWAV_FALSE; } /* If there are no samples, just return DRWAV_TRUE without doing anything. */ if (pWav->totalPCMFrameCount == 0) { return DRWAV_TRUE; } /* Make sure the sample is clamped. */ if (targetFrameIndex > pWav->totalPCMFrameCount) { targetFrameIndex = pWav->totalPCMFrameCount; } /* For compressed formats we just use a slow generic seek. If we are seeking forward we just seek forward. If we are going backwards we need to seek back to the start. */ if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) { /* TODO: This can be optimized. */ /* If we're seeking forward it's simple - just keep reading samples until we hit the sample we're requesting. If we're seeking backwards, we first need to seek back to the start and then just do the same thing as a forward seek. */ if (targetFrameIndex < pWav->readCursorInPCMFrames) { if (!drwav_seek_to_first_pcm_frame(pWav)) { return DRWAV_FALSE; } } if (targetFrameIndex > pWav->readCursorInPCMFrames) { drwav_uint64 offsetInFrames = targetFrameIndex - pWav->readCursorInPCMFrames; drwav_int16 devnull[2048]; while (offsetInFrames > 0) { drwav_uint64 framesRead = 0; drwav_uint64 framesToRead = offsetInFrames; if (framesToRead > drwav_countof(devnull)/pWav->channels) { framesToRead = drwav_countof(devnull)/pWav->channels; } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { framesRead = drwav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, devnull); } else if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { framesRead = drwav_read_pcm_frames_s16__ima(pWav, framesToRead, devnull); } else { DRWAV_ASSERT(DRWAV_FALSE); /* If this assertion is triggered it means I've implemented a new compressed format but forgot to add a branch for it here. */ } if (framesRead != framesToRead) { return DRWAV_FALSE; } offsetInFrames -= framesRead; } } } else { drwav_uint64 totalSizeInBytes; drwav_uint64 currentBytePos; drwav_uint64 targetBytePos; drwav_uint64 offset; drwav_uint32 bytesPerFrame; bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return DRWAV_FALSE; /* Not able to calculate offset. */ } totalSizeInBytes = pWav->totalPCMFrameCount * bytesPerFrame; /*DRWAV_ASSERT(totalSizeInBytes >= pWav->bytesRemaining);*/ currentBytePos = totalSizeInBytes - pWav->bytesRemaining; targetBytePos = targetFrameIndex * bytesPerFrame; if (currentBytePos < targetBytePos) { /* Offset forwards. */ offset = (targetBytePos - currentBytePos); } else { /* Offset backwards. */ if (!drwav_seek_to_first_pcm_frame(pWav)) { return DRWAV_FALSE; } offset = targetBytePos; } while (offset > 0) { int offset32 = ((offset > INT_MAX) ? INT_MAX : (int)offset); if (!pWav->onSeek(pWav->pUserData, offset32, drwav_seek_origin_current)) { return DRWAV_FALSE; } pWav->readCursorInPCMFrames += offset32 / bytesPerFrame; pWav->bytesRemaining -= offset32; offset -= offset32; } } return DRWAV_TRUE; } DRWAV_API drwav_result drwav_get_cursor_in_pcm_frames(drwav* pWav, drwav_uint64* pCursor) { if (pCursor == NULL) { return DRWAV_INVALID_ARGS; } *pCursor = 0; /* Safety. */ if (pWav == NULL) { return DRWAV_INVALID_ARGS; } *pCursor = pWav->readCursorInPCMFrames; return DRWAV_SUCCESS; } DRWAV_API drwav_result drwav_get_length_in_pcm_frames(drwav* pWav, drwav_uint64* pLength) { if (pLength == NULL) { return DRWAV_INVALID_ARGS; } *pLength = 0; /* Safety. */ if (pWav == NULL) { return DRWAV_INVALID_ARGS; } *pLength = pWav->totalPCMFrameCount; return DRWAV_SUCCESS; } DRWAV_API size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData) { size_t bytesWritten; if (pWav == NULL || bytesToWrite == 0 || pData == NULL) { return 0; } bytesWritten = pWav->onWrite(pWav->pUserData, pData, bytesToWrite); pWav->dataChunkDataSize += bytesWritten; return bytesWritten; } DRWAV_API drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint64 framesToWrite, const void* pData) { drwav_uint64 bytesToWrite; drwav_uint64 bytesWritten; const drwav_uint8* pRunningData; if (pWav == NULL || framesToWrite == 0 || pData == NULL) { return 0; } bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); if (bytesToWrite > DRWAV_SIZE_MAX) { return 0; } bytesWritten = 0; pRunningData = (const drwav_uint8*)pData; while (bytesToWrite > 0) { size_t bytesJustWritten; drwav_uint64 bytesToWriteThisIteration; bytesToWriteThisIteration = bytesToWrite; DRWAV_ASSERT(bytesToWriteThisIteration <= DRWAV_SIZE_MAX); /* <-- This is checked above. */ bytesJustWritten = drwav_write_raw(pWav, (size_t)bytesToWriteThisIteration, pRunningData); if (bytesJustWritten == 0) { break; } bytesToWrite -= bytesJustWritten; bytesWritten += bytesJustWritten; pRunningData += bytesJustWritten; } return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; } DRWAV_API drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 framesToWrite, const void* pData) { drwav_uint64 bytesToWrite; drwav_uint64 bytesWritten; drwav_uint32 bytesPerSample; const drwav_uint8* pRunningData; if (pWav == NULL || framesToWrite == 0 || pData == NULL) { return 0; } bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); if (bytesToWrite > DRWAV_SIZE_MAX) { return 0; } bytesWritten = 0; pRunningData = (const drwav_uint8*)pData; bytesPerSample = drwav_get_bytes_per_pcm_frame(pWav) / pWav->channels; if (bytesPerSample == 0) { return 0; /* Cannot determine bytes per sample, or bytes per sample is less than one byte. */ } while (bytesToWrite > 0) { drwav_uint8 temp[4096]; drwav_uint32 sampleCount; size_t bytesJustWritten; drwav_uint64 bytesToWriteThisIteration; bytesToWriteThisIteration = bytesToWrite; DRWAV_ASSERT(bytesToWriteThisIteration <= DRWAV_SIZE_MAX); /* <-- This is checked above. */ /* WAV files are always little-endian. We need to byte swap on big-endian architectures. Since our input buffer is read-only we need to use an intermediary buffer for the conversion. */ sampleCount = sizeof(temp)/bytesPerSample; if (bytesToWriteThisIteration > ((drwav_uint64)sampleCount)*bytesPerSample) { bytesToWriteThisIteration = ((drwav_uint64)sampleCount)*bytesPerSample; } DRWAV_COPY_MEMORY(temp, pRunningData, (size_t)bytesToWriteThisIteration); drwav__bswap_samples(temp, sampleCount, bytesPerSample); bytesJustWritten = drwav_write_raw(pWav, (size_t)bytesToWriteThisIteration, temp); if (bytesJustWritten == 0) { break; } bytesToWrite -= bytesJustWritten; bytesWritten += bytesJustWritten; pRunningData += bytesJustWritten; } return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; } DRWAV_API drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData) { if (drwav__is_little_endian()) { return drwav_write_pcm_frames_le(pWav, framesToWrite, pData); } else { return drwav_write_pcm_frames_be(pWav, framesToWrite, pData); } } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 totalFramesRead = 0; DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(framesToRead > 0); /* TODO: Lots of room for optimization here. */ while (pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { DRWAV_ASSERT(framesToRead > 0); /* This loop iteration will never get hit with framesToRead == 0 because it's asserted at the top, and we check for 0 inside the loop just below. */ /* If there are no cached frames we need to load a new block. */ if (pWav->msadpcm.cachedFrameCount == 0 && pWav->msadpcm.bytesRemainingInBlock == 0) { if (pWav->channels == 1) { /* Mono. */ drwav_uint8 header[7]; if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { return totalFramesRead; } pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); pWav->msadpcm.predictor[0] = header[0]; pWav->msadpcm.delta[0] = drwav_bytes_to_s16(header + 1); pWav->msadpcm.prevFrames[0][1] = (drwav_int32)drwav_bytes_to_s16(header + 3); pWav->msadpcm.prevFrames[0][0] = (drwav_int32)drwav_bytes_to_s16(header + 5); pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][0]; pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[0][1]; pWav->msadpcm.cachedFrameCount = 2; } else { /* Stereo. */ drwav_uint8 header[14]; if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { return totalFramesRead; } pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); pWav->msadpcm.predictor[0] = header[0]; pWav->msadpcm.predictor[1] = header[1]; pWav->msadpcm.delta[0] = drwav_bytes_to_s16(header + 2); pWav->msadpcm.delta[1] = drwav_bytes_to_s16(header + 4); pWav->msadpcm.prevFrames[0][1] = (drwav_int32)drwav_bytes_to_s16(header + 6); pWav->msadpcm.prevFrames[1][1] = (drwav_int32)drwav_bytes_to_s16(header + 8); pWav->msadpcm.prevFrames[0][0] = (drwav_int32)drwav_bytes_to_s16(header + 10); pWav->msadpcm.prevFrames[1][0] = (drwav_int32)drwav_bytes_to_s16(header + 12); pWav->msadpcm.cachedFrames[0] = pWav->msadpcm.prevFrames[0][0]; pWav->msadpcm.cachedFrames[1] = pWav->msadpcm.prevFrames[1][0]; pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][1]; pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[1][1]; pWav->msadpcm.cachedFrameCount = 2; } } /* Output anything that's cached. */ while (framesToRead > 0 && pWav->msadpcm.cachedFrameCount > 0 && pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { if (pBufferOut != NULL) { drwav_uint32 iSample = 0; for (iSample = 0; iSample < pWav->channels; iSample += 1) { pBufferOut[iSample] = (drwav_int16)pWav->msadpcm.cachedFrames[(drwav_countof(pWav->msadpcm.cachedFrames) - (pWav->msadpcm.cachedFrameCount*pWav->channels)) + iSample]; } pBufferOut += pWav->channels; } framesToRead -= 1; totalFramesRead += 1; pWav->readCursorInPCMFrames += 1; pWav->msadpcm.cachedFrameCount -= 1; } if (framesToRead == 0) { break; } /* If there's nothing left in the cache, just go ahead and load more. If there's nothing left to load in the current block we just continue to the next loop iteration which will trigger the loading of a new block. */ if (pWav->msadpcm.cachedFrameCount == 0) { if (pWav->msadpcm.bytesRemainingInBlock == 0) { continue; } else { static drwav_int32 adaptationTable[] = { 230, 230, 230, 230, 307, 409, 512, 614, 768, 614, 512, 409, 307, 230, 230, 230 }; static drwav_int32 coeff1Table[] = { 256, 512, 0, 192, 240, 460, 392 }; static drwav_int32 coeff2Table[] = { 0, -256, 0, 64, 0, -208, -232 }; drwav_uint8 nibbles; drwav_int32 nibble0; drwav_int32 nibble1; if (pWav->onRead(pWav->pUserData, &nibbles, 1) != 1) { return totalFramesRead; } pWav->msadpcm.bytesRemainingInBlock -= 1; /* TODO: Optimize away these if statements. */ nibble0 = ((nibbles & 0xF0) >> 4); if ((nibbles & 0x80)) { nibble0 |= 0xFFFFFFF0UL; } nibble1 = ((nibbles & 0x0F) >> 0); if ((nibbles & 0x08)) { nibble1 |= 0xFFFFFFF0UL; } if (pWav->channels == 1) { /* Mono. */ drwav_int32 newSample0; drwav_int32 newSample1; newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; newSample0 += nibble0 * pWav->msadpcm.delta[0]; newSample0 = drwav_clamp(newSample0, -32768, 32767); pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; if (pWav->msadpcm.delta[0] < 16) { pWav->msadpcm.delta[0] = 16; } pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; pWav->msadpcm.prevFrames[0][1] = newSample0; newSample1 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; newSample1 += nibble1 * pWav->msadpcm.delta[0]; newSample1 = drwav_clamp(newSample1, -32768, 32767); pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[0]) >> 8; if (pWav->msadpcm.delta[0] < 16) { pWav->msadpcm.delta[0] = 16; } pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; pWav->msadpcm.prevFrames[0][1] = newSample1; pWav->msadpcm.cachedFrames[2] = newSample0; pWav->msadpcm.cachedFrames[3] = newSample1; pWav->msadpcm.cachedFrameCount = 2; } else { /* Stereo. */ drwav_int32 newSample0; drwav_int32 newSample1; /* Left. */ newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; newSample0 += nibble0 * pWav->msadpcm.delta[0]; newSample0 = drwav_clamp(newSample0, -32768, 32767); pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; if (pWav->msadpcm.delta[0] < 16) { pWav->msadpcm.delta[0] = 16; } pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; pWav->msadpcm.prevFrames[0][1] = newSample0; /* Right. */ newSample1 = ((pWav->msadpcm.prevFrames[1][1] * coeff1Table[pWav->msadpcm.predictor[1]]) + (pWav->msadpcm.prevFrames[1][0] * coeff2Table[pWav->msadpcm.predictor[1]])) >> 8; newSample1 += nibble1 * pWav->msadpcm.delta[1]; newSample1 = drwav_clamp(newSample1, -32768, 32767); pWav->msadpcm.delta[1] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[1]) >> 8; if (pWav->msadpcm.delta[1] < 16) { pWav->msadpcm.delta[1] = 16; } pWav->msadpcm.prevFrames[1][0] = pWav->msadpcm.prevFrames[1][1]; pWav->msadpcm.prevFrames[1][1] = newSample1; pWav->msadpcm.cachedFrames[2] = newSample0; pWav->msadpcm.cachedFrames[3] = newSample1; pWav->msadpcm.cachedFrameCount = 1; } } } } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 totalFramesRead = 0; drwav_uint32 iChannel; static drwav_int32 indexTable[16] = { -1, -1, -1, -1, 2, 4, 6, 8, -1, -1, -1, -1, 2, 4, 6, 8 }; static drwav_int32 stepTable[89] = { 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 }; DRWAV_ASSERT(pWav != NULL); DRWAV_ASSERT(framesToRead > 0); /* TODO: Lots of room for optimization here. */ while (pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { DRWAV_ASSERT(framesToRead > 0); /* This loop iteration will never get hit with framesToRead == 0 because it's asserted at the top, and we check for 0 inside the loop just below. */ /* If there are no cached samples we need to load a new block. */ if (pWav->ima.cachedFrameCount == 0 && pWav->ima.bytesRemainingInBlock == 0) { if (pWav->channels == 1) { /* Mono. */ drwav_uint8 header[4]; if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { return totalFramesRead; } pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); if (header[2] >= drwav_countof(stepTable)) { pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, drwav_seek_origin_current); pWav->ima.bytesRemainingInBlock = 0; return totalFramesRead; /* Invalid data. */ } pWav->ima.predictor[0] = (drwav_int16)drwav_bytes_to_u16(header + 0); pWav->ima.stepIndex[0] = drwav_clamp(header[2], 0, (drwav_int32)drwav_countof(stepTable)-1); /* Clamp not necessary because we checked above, but adding here to silence a static analysis warning. */ pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[0]; pWav->ima.cachedFrameCount = 1; } else { /* Stereo. */ drwav_uint8 header[8]; if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { return totalFramesRead; } pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); if (header[2] >= drwav_countof(stepTable) || header[6] >= drwav_countof(stepTable)) { pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, drwav_seek_origin_current); pWav->ima.bytesRemainingInBlock = 0; return totalFramesRead; /* Invalid data. */ } pWav->ima.predictor[0] = drwav_bytes_to_s16(header + 0); pWav->ima.stepIndex[0] = drwav_clamp(header[2], 0, (drwav_int32)drwav_countof(stepTable)-1); /* Clamp not necessary because we checked above, but adding here to silence a static analysis warning. */ pWav->ima.predictor[1] = drwav_bytes_to_s16(header + 4); pWav->ima.stepIndex[1] = drwav_clamp(header[6], 0, (drwav_int32)drwav_countof(stepTable)-1); /* Clamp not necessary because we checked above, but adding here to silence a static analysis warning. */ pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 2] = pWav->ima.predictor[0]; pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[1]; pWav->ima.cachedFrameCount = 1; } } /* Output anything that's cached. */ while (framesToRead > 0 && pWav->ima.cachedFrameCount > 0 && pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { if (pBufferOut != NULL) { drwav_uint32 iSample; for (iSample = 0; iSample < pWav->channels; iSample += 1) { pBufferOut[iSample] = (drwav_int16)pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + iSample]; } pBufferOut += pWav->channels; } framesToRead -= 1; totalFramesRead += 1; pWav->readCursorInPCMFrames += 1; pWav->ima.cachedFrameCount -= 1; } if (framesToRead == 0) { break; } /* If there's nothing left in the cache, just go ahead and load more. If there's nothing left to load in the current block we just continue to the next loop iteration which will trigger the loading of a new block. */ if (pWav->ima.cachedFrameCount == 0) { if (pWav->ima.bytesRemainingInBlock == 0) { continue; } else { /* From what I can tell with stereo streams, it looks like every 4 bytes (8 samples) is for one channel. So it goes 4 bytes for the left channel, 4 bytes for the right channel. */ pWav->ima.cachedFrameCount = 8; for (iChannel = 0; iChannel < pWav->channels; ++iChannel) { drwav_uint32 iByte; drwav_uint8 nibbles[4]; if (pWav->onRead(pWav->pUserData, &nibbles, 4) != 4) { pWav->ima.cachedFrameCount = 0; return totalFramesRead; } pWav->ima.bytesRemainingInBlock -= 4; for (iByte = 0; iByte < 4; ++iByte) { drwav_uint8 nibble0 = ((nibbles[iByte] & 0x0F) >> 0); drwav_uint8 nibble1 = ((nibbles[iByte] & 0xF0) >> 4); drwav_int32 step = stepTable[pWav->ima.stepIndex[iChannel]]; drwav_int32 predictor = pWav->ima.predictor[iChannel]; drwav_int32 diff = step >> 3; if (nibble0 & 1) diff += step >> 2; if (nibble0 & 2) diff += step >> 1; if (nibble0 & 4) diff += step; if (nibble0 & 8) diff = -diff; predictor = drwav_clamp(predictor + diff, -32768, 32767); pWav->ima.predictor[iChannel] = predictor; pWav->ima.stepIndex[iChannel] = drwav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble0], 0, (drwav_int32)drwav_countof(stepTable)-1); pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+0)*pWav->channels + iChannel] = predictor; step = stepTable[pWav->ima.stepIndex[iChannel]]; predictor = pWav->ima.predictor[iChannel]; diff = step >> 3; if (nibble1 & 1) diff += step >> 2; if (nibble1 & 2) diff += step >> 1; if (nibble1 & 4) diff += step; if (nibble1 & 8) diff = -diff; predictor = drwav_clamp(predictor + diff, -32768, 32767); pWav->ima.predictor[iChannel] = predictor; pWav->ima.stepIndex[iChannel] = drwav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble1], 0, (drwav_int32)drwav_countof(stepTable)-1); pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+1)*pWav->channels + iChannel] = predictor; } } } } } return totalFramesRead; } #ifndef DR_WAV_NO_CONVERSION_API static unsigned short g_drwavAlawTable[256] = { 0xEA80, 0xEB80, 0xE880, 0xE980, 0xEE80, 0xEF80, 0xEC80, 0xED80, 0xE280, 0xE380, 0xE080, 0xE180, 0xE680, 0xE780, 0xE480, 0xE580, 0xF540, 0xF5C0, 0xF440, 0xF4C0, 0xF740, 0xF7C0, 0xF640, 0xF6C0, 0xF140, 0xF1C0, 0xF040, 0xF0C0, 0xF340, 0xF3C0, 0xF240, 0xF2C0, 0xAA00, 0xAE00, 0xA200, 0xA600, 0xBA00, 0xBE00, 0xB200, 0xB600, 0x8A00, 0x8E00, 0x8200, 0x8600, 0x9A00, 0x9E00, 0x9200, 0x9600, 0xD500, 0xD700, 0xD100, 0xD300, 0xDD00, 0xDF00, 0xD900, 0xDB00, 0xC500, 0xC700, 0xC100, 0xC300, 0xCD00, 0xCF00, 0xC900, 0xCB00, 0xFEA8, 0xFEB8, 0xFE88, 0xFE98, 0xFEE8, 0xFEF8, 0xFEC8, 0xFED8, 0xFE28, 0xFE38, 0xFE08, 0xFE18, 0xFE68, 0xFE78, 0xFE48, 0xFE58, 0xFFA8, 0xFFB8, 0xFF88, 0xFF98, 0xFFE8, 0xFFF8, 0xFFC8, 0xFFD8, 0xFF28, 0xFF38, 0xFF08, 0xFF18, 0xFF68, 0xFF78, 0xFF48, 0xFF58, 0xFAA0, 0xFAE0, 0xFA20, 0xFA60, 0xFBA0, 0xFBE0, 0xFB20, 0xFB60, 0xF8A0, 0xF8E0, 0xF820, 0xF860, 0xF9A0, 0xF9E0, 0xF920, 0xF960, 0xFD50, 0xFD70, 0xFD10, 0xFD30, 0xFDD0, 0xFDF0, 0xFD90, 0xFDB0, 0xFC50, 0xFC70, 0xFC10, 0xFC30, 0xFCD0, 0xFCF0, 0xFC90, 0xFCB0, 0x1580, 0x1480, 0x1780, 0x1680, 0x1180, 0x1080, 0x1380, 0x1280, 0x1D80, 0x1C80, 0x1F80, 0x1E80, 0x1980, 0x1880, 0x1B80, 0x1A80, 0x0AC0, 0x0A40, 0x0BC0, 0x0B40, 0x08C0, 0x0840, 0x09C0, 0x0940, 0x0EC0, 0x0E40, 0x0FC0, 0x0F40, 0x0CC0, 0x0C40, 0x0DC0, 0x0D40, 0x5600, 0x5200, 0x5E00, 0x5A00, 0x4600, 0x4200, 0x4E00, 0x4A00, 0x7600, 0x7200, 0x7E00, 0x7A00, 0x6600, 0x6200, 0x6E00, 0x6A00, 0x2B00, 0x2900, 0x2F00, 0x2D00, 0x2300, 0x2100, 0x2700, 0x2500, 0x3B00, 0x3900, 0x3F00, 0x3D00, 0x3300, 0x3100, 0x3700, 0x3500, 0x0158, 0x0148, 0x0178, 0x0168, 0x0118, 0x0108, 0x0138, 0x0128, 0x01D8, 0x01C8, 0x01F8, 0x01E8, 0x0198, 0x0188, 0x01B8, 0x01A8, 0x0058, 0x0048, 0x0078, 0x0068, 0x0018, 0x0008, 0x0038, 0x0028, 0x00D8, 0x00C8, 0x00F8, 0x00E8, 0x0098, 0x0088, 0x00B8, 0x00A8, 0x0560, 0x0520, 0x05E0, 0x05A0, 0x0460, 0x0420, 0x04E0, 0x04A0, 0x0760, 0x0720, 0x07E0, 0x07A0, 0x0660, 0x0620, 0x06E0, 0x06A0, 0x02B0, 0x0290, 0x02F0, 0x02D0, 0x0230, 0x0210, 0x0270, 0x0250, 0x03B0, 0x0390, 0x03F0, 0x03D0, 0x0330, 0x0310, 0x0370, 0x0350 }; static unsigned short g_drwavMulawTable[256] = { 0x8284, 0x8684, 0x8A84, 0x8E84, 0x9284, 0x9684, 0x9A84, 0x9E84, 0xA284, 0xA684, 0xAA84, 0xAE84, 0xB284, 0xB684, 0xBA84, 0xBE84, 0xC184, 0xC384, 0xC584, 0xC784, 0xC984, 0xCB84, 0xCD84, 0xCF84, 0xD184, 0xD384, 0xD584, 0xD784, 0xD984, 0xDB84, 0xDD84, 0xDF84, 0xE104, 0xE204, 0xE304, 0xE404, 0xE504, 0xE604, 0xE704, 0xE804, 0xE904, 0xEA04, 0xEB04, 0xEC04, 0xED04, 0xEE04, 0xEF04, 0xF004, 0xF0C4, 0xF144, 0xF1C4, 0xF244, 0xF2C4, 0xF344, 0xF3C4, 0xF444, 0xF4C4, 0xF544, 0xF5C4, 0xF644, 0xF6C4, 0xF744, 0xF7C4, 0xF844, 0xF8A4, 0xF8E4, 0xF924, 0xF964, 0xF9A4, 0xF9E4, 0xFA24, 0xFA64, 0xFAA4, 0xFAE4, 0xFB24, 0xFB64, 0xFBA4, 0xFBE4, 0xFC24, 0xFC64, 0xFC94, 0xFCB4, 0xFCD4, 0xFCF4, 0xFD14, 0xFD34, 0xFD54, 0xFD74, 0xFD94, 0xFDB4, 0xFDD4, 0xFDF4, 0xFE14, 0xFE34, 0xFE54, 0xFE74, 0xFE8C, 0xFE9C, 0xFEAC, 0xFEBC, 0xFECC, 0xFEDC, 0xFEEC, 0xFEFC, 0xFF0C, 0xFF1C, 0xFF2C, 0xFF3C, 0xFF4C, 0xFF5C, 0xFF6C, 0xFF7C, 0xFF88, 0xFF90, 0xFF98, 0xFFA0, 0xFFA8, 0xFFB0, 0xFFB8, 0xFFC0, 0xFFC8, 0xFFD0, 0xFFD8, 0xFFE0, 0xFFE8, 0xFFF0, 0xFFF8, 0x0000, 0x7D7C, 0x797C, 0x757C, 0x717C, 0x6D7C, 0x697C, 0x657C, 0x617C, 0x5D7C, 0x597C, 0x557C, 0x517C, 0x4D7C, 0x497C, 0x457C, 0x417C, 0x3E7C, 0x3C7C, 0x3A7C, 0x387C, 0x367C, 0x347C, 0x327C, 0x307C, 0x2E7C, 0x2C7C, 0x2A7C, 0x287C, 0x267C, 0x247C, 0x227C, 0x207C, 0x1EFC, 0x1DFC, 0x1CFC, 0x1BFC, 0x1AFC, 0x19FC, 0x18FC, 0x17FC, 0x16FC, 0x15FC, 0x14FC, 0x13FC, 0x12FC, 0x11FC, 0x10FC, 0x0FFC, 0x0F3C, 0x0EBC, 0x0E3C, 0x0DBC, 0x0D3C, 0x0CBC, 0x0C3C, 0x0BBC, 0x0B3C, 0x0ABC, 0x0A3C, 0x09BC, 0x093C, 0x08BC, 0x083C, 0x07BC, 0x075C, 0x071C, 0x06DC, 0x069C, 0x065C, 0x061C, 0x05DC, 0x059C, 0x055C, 0x051C, 0x04DC, 0x049C, 0x045C, 0x041C, 0x03DC, 0x039C, 0x036C, 0x034C, 0x032C, 0x030C, 0x02EC, 0x02CC, 0x02AC, 0x028C, 0x026C, 0x024C, 0x022C, 0x020C, 0x01EC, 0x01CC, 0x01AC, 0x018C, 0x0174, 0x0164, 0x0154, 0x0144, 0x0134, 0x0124, 0x0114, 0x0104, 0x00F4, 0x00E4, 0x00D4, 0x00C4, 0x00B4, 0x00A4, 0x0094, 0x0084, 0x0078, 0x0070, 0x0068, 0x0060, 0x0058, 0x0050, 0x0048, 0x0040, 0x0038, 0x0030, 0x0028, 0x0020, 0x0018, 0x0010, 0x0008, 0x0000 }; static DRWAV_INLINE drwav_int16 drwav__alaw_to_s16(drwav_uint8 sampleIn) { return (short)g_drwavAlawTable[sampleIn]; } static DRWAV_INLINE drwav_int16 drwav__mulaw_to_s16(drwav_uint8 sampleIn) { return (short)g_drwavMulawTable[sampleIn]; } DRWAV_PRIVATE void drwav__pcm_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) { size_t i; /* Special case for 8-bit sample data because it's treated as unsigned. */ if (bytesPerSample == 1) { drwav_u8_to_s16(pOut, pIn, totalSampleCount); return; } /* Slightly more optimal implementation for common formats. */ if (bytesPerSample == 2) { for (i = 0; i < totalSampleCount; ++i) { *pOut++ = ((const drwav_int16*)pIn)[i]; } return; } if (bytesPerSample == 3) { drwav_s24_to_s16(pOut, pIn, totalSampleCount); return; } if (bytesPerSample == 4) { drwav_s32_to_s16(pOut, (const drwav_int32*)pIn, totalSampleCount); return; } /* Anything more than 64 bits per sample is not supported. */ if (bytesPerSample > 8) { DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); return; } /* Generic, slow converter. */ for (i = 0; i < totalSampleCount; ++i) { drwav_uint64 sample = 0; unsigned int shift = (8 - bytesPerSample) * 8; unsigned int j; for (j = 0; j < bytesPerSample; j += 1) { DRWAV_ASSERT(j < 8); sample |= (drwav_uint64)(pIn[j]) << shift; shift += 8; } pIn += j; *pOut++ = (drwav_int16)((drwav_int64)sample >> 48); } } DRWAV_PRIVATE void drwav__ieee_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) { if (bytesPerSample == 4) { drwav_f32_to_s16(pOut, (const float*)pIn, totalSampleCount); return; } else if (bytesPerSample == 8) { drwav_f64_to_s16(pOut, (const double*)pIn, totalSampleCount); return; } else { /* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */ DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); return; } } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__pcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; /* Fast path. */ if ((pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 16) || pBufferOut == NULL) { return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut); } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav__pcm_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__ieee(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; if (pBufferOut == NULL) { return drwav_read_pcm_frames(pWav, framesToRead, NULL); } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav__ieee_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); /* Safe cast. */ pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__alaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; if (pBufferOut == NULL) { return drwav_read_pcm_frames(pWav, framesToRead, NULL); } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav_alaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); /* For some reason libsndfile seems to be returning samples of the opposite sign for a-law, but only with AIFF files. For WAV files it seems to be the same as dr_wav. This is resulting in dr_wav's automated tests failing. I'm not sure which is correct, but will assume dr_wav. If we're enforcing libsndfile compatibility we'll swap the signs here. */ #ifdef DR_WAV_LIBSNDFILE_COMPAT { if (pWav->container == drwav_container_aiff) { drwav_uint64 iSample; for (iSample = 0; iSample < samplesRead; iSample += 1) { pBufferOut[iSample] = -pBufferOut[iSample]; } } } #endif pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s16__mulaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; if (pBufferOut == NULL) { return drwav_read_pcm_frames(pWav, framesToRead, NULL); } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav_mulaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); /* Just like with alaw, for some reason the signs between libsndfile and dr_wav are opposite. We just need to swap the sign if we're compiling with libsndfile compatiblity so our automated tests don't fail. */ #ifdef DR_WAV_LIBSNDFILE_COMPAT { if (pWav->container == drwav_container_aiff) { drwav_uint64 iSample; for (iSample = 0; iSample < samplesRead; iSample += 1) { pBufferOut[iSample] = -pBufferOut[iSample]; } } } #endif pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { if (pWav == NULL || framesToRead == 0) { return 0; } if (pBufferOut == NULL) { return drwav_read_pcm_frames(pWav, framesToRead, NULL); } /* Don't try to read more samples than can potentially fit in the output buffer. */ if (framesToRead * pWav->channels * sizeof(drwav_int16) > DRWAV_SIZE_MAX) { framesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int16) / pWav->channels; } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) { return drwav_read_pcm_frames_s16__pcm(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) { return drwav_read_pcm_frames_s16__ieee(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) { return drwav_read_pcm_frames_s16__alaw(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) { return drwav_read_pcm_frames_s16__mulaw(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) { return drwav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { return drwav_read_pcm_frames_s16__ima(pWav, framesToRead, pBufferOut); } return 0; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_FALSE) { drwav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); } return framesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut) { drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_TRUE) { drwav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); } return framesRead; } DRWAV_API void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) { int r; size_t i; for (i = 0; i < sampleCount; ++i) { int x = pIn[i]; r = x << 8; r = r - 32768; pOut[i] = (short)r; } } DRWAV_API void drwav_s24_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) { int r; size_t i; for (i = 0; i < sampleCount; ++i) { int x = ((int)(((unsigned int)(((const drwav_uint8*)pIn)[i*3+0]) << 8) | ((unsigned int)(((const drwav_uint8*)pIn)[i*3+1]) << 16) | ((unsigned int)(((const drwav_uint8*)pIn)[i*3+2])) << 24)) >> 8; r = x >> 8; pOut[i] = (short)r; } } DRWAV_API void drwav_s32_to_s16(drwav_int16* pOut, const drwav_int32* pIn, size_t sampleCount) { int r; size_t i; for (i = 0; i < sampleCount; ++i) { int x = pIn[i]; r = x >> 16; pOut[i] = (short)r; } } DRWAV_API void drwav_f32_to_s16(drwav_int16* pOut, const float* pIn, size_t sampleCount) { int r; size_t i; for (i = 0; i < sampleCount; ++i) { float x = pIn[i]; float c; c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); c = c + 1; r = (int)(c * 32767.5f); r = r - 32768; pOut[i] = (short)r; } } DRWAV_API void drwav_f64_to_s16(drwav_int16* pOut, const double* pIn, size_t sampleCount) { int r; size_t i; for (i = 0; i < sampleCount; ++i) { double x = pIn[i]; double c; c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); c = c + 1; r = (int)(c * 32767.5); r = r - 32768; pOut[i] = (short)r; } } DRWAV_API void drwav_alaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; for (i = 0; i < sampleCount; ++i) { pOut[i] = drwav__alaw_to_s16(pIn[i]); } } DRWAV_API void drwav_mulaw_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; for (i = 0; i < sampleCount; ++i) { pOut[i] = drwav__mulaw_to_s16(pIn[i]); } } DRWAV_PRIVATE void drwav__pcm_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) { unsigned int i; /* Special case for 8-bit sample data because it's treated as unsigned. */ if (bytesPerSample == 1) { drwav_u8_to_f32(pOut, pIn, sampleCount); return; } /* Slightly more optimal implementation for common formats. */ if (bytesPerSample == 2) { drwav_s16_to_f32(pOut, (const drwav_int16*)pIn, sampleCount); return; } if (bytesPerSample == 3) { drwav_s24_to_f32(pOut, pIn, sampleCount); return; } if (bytesPerSample == 4) { drwav_s32_to_f32(pOut, (const drwav_int32*)pIn, sampleCount); return; } /* Anything more than 64 bits per sample is not supported. */ if (bytesPerSample > 8) { DRWAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); return; } /* Generic, slow converter. */ for (i = 0; i < sampleCount; ++i) { drwav_uint64 sample = 0; unsigned int shift = (8 - bytesPerSample) * 8; unsigned int j; for (j = 0; j < bytesPerSample; j += 1) { DRWAV_ASSERT(j < 8); sample |= (drwav_uint64)(pIn[j]) << shift; shift += 8; } pIn += j; *pOut++ = (float)((drwav_int64)sample / 9223372036854775807.0); } } DRWAV_PRIVATE void drwav__ieee_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) { if (bytesPerSample == 4) { unsigned int i; for (i = 0; i < sampleCount; ++i) { *pOut++ = ((const float*)pIn)[i]; } return; } else if (bytesPerSample == 8) { drwav_f64_to_f32(pOut, (const double*)pIn, sampleCount); return; } else { /* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */ DRWAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); return; } } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_f32__pcm(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav__pcm_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_f32__msadpcm_ima(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { /* We're just going to borrow the implementation from the drwav_read_s16() since ADPCM is a little bit more complicated than other formats and I don't want to duplicate that code. */ drwav_uint64 totalFramesRead; drwav_int16 samples16[2048]; totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels); drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToReadThisIteration, samples16); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ drwav_s16_to_f32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */ pBufferOut += framesRead*pWav->channels; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_f32__ieee(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; /* Fast path. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT && pWav->bitsPerSample == 32) { return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut); } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav__ieee_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_f32__alaw(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav_alaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); #ifdef DR_WAV_LIBSNDFILE_COMPAT { if (pWav->container == drwav_container_aiff) { drwav_uint64 iSample; for (iSample = 0; iSample < samplesRead; iSample += 1) { pBufferOut[iSample] = -pBufferOut[iSample]; } } } #endif pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_f32__mulaw(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav_mulaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); #ifdef DR_WAV_LIBSNDFILE_COMPAT { if (pWav->container == drwav_container_aiff) { drwav_uint64 iSample; for (iSample = 0; iSample < samplesRead; iSample += 1) { pBufferOut[iSample] = -pBufferOut[iSample]; } } } #endif pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { if (pWav == NULL || framesToRead == 0) { return 0; } if (pBufferOut == NULL) { return drwav_read_pcm_frames(pWav, framesToRead, NULL); } /* Don't try to read more samples than can potentially fit in the output buffer. */ if (framesToRead * pWav->channels * sizeof(float) > DRWAV_SIZE_MAX) { framesToRead = DRWAV_SIZE_MAX / sizeof(float) / pWav->channels; } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) { return drwav_read_pcm_frames_f32__pcm(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { return drwav_read_pcm_frames_f32__msadpcm_ima(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) { return drwav_read_pcm_frames_f32__ieee(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) { return drwav_read_pcm_frames_f32__alaw(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) { return drwav_read_pcm_frames_f32__mulaw(pWav, framesToRead, pBufferOut); } return 0; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { drwav_uint64 framesRead = drwav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_FALSE) { drwav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); } return framesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut) { drwav_uint64 framesRead = drwav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_TRUE) { drwav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); } return framesRead; } DRWAV_API void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } #ifdef DR_WAV_LIBSNDFILE_COMPAT /* It appears libsndfile uses slightly different logic for the u8 -> f32 conversion to dr_wav, which in my opinion is incorrect. It appears libsndfile performs the conversion something like "f32 = (u8 / 256) * 2 - 1", however I think it should be "f32 = (u8 / 255) * 2 - 1" (note the divisor of 256 vs 255). I use libsndfile as a benchmark for testing, so I'm therefore leaving this block here just for my automated correctness testing. This is disabled by default. */ for (i = 0; i < sampleCount; ++i) { *pOut++ = (pIn[i] / 256.0f) * 2 - 1; } #else for (i = 0; i < sampleCount; ++i) { float x = pIn[i]; x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */ x = x - 1; /* 0..2 to -1..1 */ *pOut++ = x; } #endif } DRWAV_API void drwav_s16_to_f32(float* pOut, const drwav_int16* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = pIn[i] * 0.000030517578125f; } } DRWAV_API void drwav_s24_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { double x; drwav_uint32 a = ((drwav_uint32)(pIn[i*3+0]) << 8); drwav_uint32 b = ((drwav_uint32)(pIn[i*3+1]) << 16); drwav_uint32 c = ((drwav_uint32)(pIn[i*3+2]) << 24); x = (double)((drwav_int32)(a | b | c) >> 8); *pOut++ = (float)(x * 0.00000011920928955078125); } } DRWAV_API void drwav_s32_to_f32(float* pOut, const drwav_int32* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = (float)(pIn[i] / 2147483648.0); } } DRWAV_API void drwav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = (float)pIn[i]; } } DRWAV_API void drwav_alaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = drwav__alaw_to_s16(pIn[i]) / 32768.0f; } } DRWAV_API void drwav_mulaw_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = drwav__mulaw_to_s16(pIn[i]) / 32768.0f; } } DRWAV_PRIVATE void drwav__pcm_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) { unsigned int i; /* Special case for 8-bit sample data because it's treated as unsigned. */ if (bytesPerSample == 1) { drwav_u8_to_s32(pOut, pIn, totalSampleCount); return; } /* Slightly more optimal implementation for common formats. */ if (bytesPerSample == 2) { drwav_s16_to_s32(pOut, (const drwav_int16*)pIn, totalSampleCount); return; } if (bytesPerSample == 3) { drwav_s24_to_s32(pOut, pIn, totalSampleCount); return; } if (bytesPerSample == 4) { for (i = 0; i < totalSampleCount; ++i) { *pOut++ = ((const drwav_int32*)pIn)[i]; } return; } /* Anything more than 64 bits per sample is not supported. */ if (bytesPerSample > 8) { DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); return; } /* Generic, slow converter. */ for (i = 0; i < totalSampleCount; ++i) { drwav_uint64 sample = 0; unsigned int shift = (8 - bytesPerSample) * 8; unsigned int j; for (j = 0; j < bytesPerSample; j += 1) { DRWAV_ASSERT(j < 8); sample |= (drwav_uint64)(pIn[j]) << shift; shift += 8; } pIn += j; *pOut++ = (drwav_int32)((drwav_int64)sample >> 32); } } DRWAV_PRIVATE void drwav__ieee_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) { if (bytesPerSample == 4) { drwav_f32_to_s32(pOut, (const float*)pIn, totalSampleCount); return; } else if (bytesPerSample == 8) { drwav_f64_to_s32(pOut, (const double*)pIn, totalSampleCount); return; } else { /* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */ DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); return; } } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s32__pcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; /* Fast path. */ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 32) { return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut); } bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav__pcm_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s32__msadpcm_ima(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { /* We're just going to borrow the implementation from the drwav_read_s16() since ADPCM is a little bit more complicated than other formats and I don't want to duplicate that code. */ drwav_uint64 totalFramesRead = 0; drwav_int16 samples16[2048]; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels); drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToReadThisIteration, samples16); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ drwav_s16_to_s32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */ pBufferOut += framesRead*pWav->channels; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s32__ieee(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav__ieee_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s32__alaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav_alaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); #ifdef DR_WAV_LIBSNDFILE_COMPAT { if (pWav->container == drwav_container_aiff) { drwav_uint64 iSample; for (iSample = 0; iSample < samplesRead; iSample += 1) { pBufferOut[iSample] = -pBufferOut[iSample]; } } } #endif pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_PRIVATE drwav_uint64 drwav_read_pcm_frames_s32__mulaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { drwav_uint64 totalFramesRead; drwav_uint8 sampleData[4096] = {0}; drwav_uint32 bytesPerFrame; drwav_uint32 bytesPerSample; drwav_uint64 samplesRead; bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav); if (bytesPerFrame == 0) { return 0; } bytesPerSample = bytesPerFrame / pWav->channels; if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { return 0; /* Only byte-aligned formats are supported. */ } totalFramesRead = 0; while (framesToRead > 0) { drwav_uint64 framesToReadThisIteration = drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); if (framesRead == 0) { break; } DRWAV_ASSERT(framesRead <= framesToReadThisIteration); /* If this fails it means there's a bug in drwav_read_pcm_frames(). */ /* Validation to ensure we don't read too much from out intermediary buffer. This is to protect from invalid files. */ samplesRead = framesRead * pWav->channels; if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { DRWAV_ASSERT(DRWAV_FALSE); /* This should never happen with a valid file. */ break; } drwav_mulaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); #ifdef DR_WAV_LIBSNDFILE_COMPAT { if (pWav->container == drwav_container_aiff) { drwav_uint64 iSample; for (iSample = 0; iSample < samplesRead; iSample += 1) { pBufferOut[iSample] = -pBufferOut[iSample]; } } } #endif pBufferOut += samplesRead; framesToRead -= framesRead; totalFramesRead += framesRead; } return totalFramesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { if (pWav == NULL || framesToRead == 0) { return 0; } if (pBufferOut == NULL) { return drwav_read_pcm_frames(pWav, framesToRead, NULL); } /* Don't try to read more samples than can potentially fit in the output buffer. */ if (framesToRead * pWav->channels * sizeof(drwav_int32) > DRWAV_SIZE_MAX) { framesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int32) / pWav->channels; } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) { return drwav_read_pcm_frames_s32__pcm(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) { return drwav_read_pcm_frames_s32__msadpcm_ima(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) { return drwav_read_pcm_frames_s32__ieee(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) { return drwav_read_pcm_frames_s32__alaw(pWav, framesToRead, pBufferOut); } if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) { return drwav_read_pcm_frames_s32__mulaw(pWav, framesToRead, pBufferOut); } return 0; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { drwav_uint64 framesRead = drwav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_FALSE) { drwav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); } return framesRead; } DRWAV_API drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut) { drwav_uint64 framesRead = drwav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); if (pBufferOut != NULL && drwav__is_little_endian() == DRWAV_TRUE) { drwav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); } return framesRead; } DRWAV_API void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = ((int)pIn[i] - 128) << 24; } } DRWAV_API void drwav_s16_to_s32(drwav_int32* pOut, const drwav_int16* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = pIn[i] << 16; } } DRWAV_API void drwav_s24_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { unsigned int s0 = pIn[i*3 + 0]; unsigned int s1 = pIn[i*3 + 1]; unsigned int s2 = pIn[i*3 + 2]; drwav_int32 sample32 = (drwav_int32)((s0 << 8) | (s1 << 16) | (s2 << 24)); *pOut++ = sample32; } } DRWAV_API void drwav_f32_to_s32(drwav_int32* pOut, const float* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = (drwav_int32)(2147483648.0 * pIn[i]); } } DRWAV_API void drwav_f64_to_s32(drwav_int32* pOut, const double* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = (drwav_int32)(2147483648.0 * pIn[i]); } } DRWAV_API void drwav_alaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i = 0; i < sampleCount; ++i) { *pOut++ = ((drwav_int32)drwav__alaw_to_s16(pIn[i])) << 16; } } DRWAV_API void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount) { size_t i; if (pOut == NULL || pIn == NULL) { return; } for (i= 0; i < sampleCount; ++i) { *pOut++ = ((drwav_int32)drwav__mulaw_to_s16(pIn[i])) << 16; } } DRWAV_PRIVATE drwav_int16* drwav__read_pcm_frames_and_close_s16(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount) { drwav_uint64 sampleDataSize; drwav_int16* pSampleData; drwav_uint64 framesRead; DRWAV_ASSERT(pWav != NULL); sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(drwav_int16); if (sampleDataSize > DRWAV_SIZE_MAX) { drwav_uninit(pWav); return NULL; /* File's too big. */ } pSampleData = (drwav_int16*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */ if (pSampleData == NULL) { drwav_uninit(pWav); return NULL; /* Failed to allocate memory. */ } framesRead = drwav_read_pcm_frames_s16(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); if (framesRead != pWav->totalPCMFrameCount) { drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); drwav_uninit(pWav); return NULL; /* There was an error reading the samples. */ } drwav_uninit(pWav); if (sampleRate) { *sampleRate = pWav->sampleRate; } if (channels) { *channels = pWav->channels; } if (totalFrameCount) { *totalFrameCount = pWav->totalPCMFrameCount; } return pSampleData; } DRWAV_PRIVATE float* drwav__read_pcm_frames_and_close_f32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount) { drwav_uint64 sampleDataSize; float* pSampleData; drwav_uint64 framesRead; DRWAV_ASSERT(pWav != NULL); sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(float); if (sampleDataSize > DRWAV_SIZE_MAX) { drwav_uninit(pWav); return NULL; /* File's too big. */ } pSampleData = (float*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */ if (pSampleData == NULL) { drwav_uninit(pWav); return NULL; /* Failed to allocate memory. */ } framesRead = drwav_read_pcm_frames_f32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); if (framesRead != pWav->totalPCMFrameCount) { drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); drwav_uninit(pWav); return NULL; /* There was an error reading the samples. */ } drwav_uninit(pWav); if (sampleRate) { *sampleRate = pWav->sampleRate; } if (channels) { *channels = pWav->channels; } if (totalFrameCount) { *totalFrameCount = pWav->totalPCMFrameCount; } return pSampleData; } DRWAV_PRIVATE drwav_int32* drwav__read_pcm_frames_and_close_s32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount) { drwav_uint64 sampleDataSize; drwav_int32* pSampleData; drwav_uint64 framesRead; DRWAV_ASSERT(pWav != NULL); sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(drwav_int32); if (sampleDataSize > DRWAV_SIZE_MAX) { drwav_uninit(pWav); return NULL; /* File's too big. */ } pSampleData = (drwav_int32*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */ if (pSampleData == NULL) { drwav_uninit(pWav); return NULL; /* Failed to allocate memory. */ } framesRead = drwav_read_pcm_frames_s32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); if (framesRead != pWav->totalPCMFrameCount) { drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); drwav_uninit(pWav); return NULL; /* There was an error reading the samples. */ } drwav_uninit(pWav); if (sampleRate) { *sampleRate = pWav->sampleRate; } if (channels) { *channels = pWav->channels; } if (totalFrameCount) { *totalFrameCount = pWav->totalPCMFrameCount; } return pSampleData; } DRWAV_API drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } #ifndef DR_WAV_NO_STDIO DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } #ifndef DR_WAV_NO_WCHAR DRWAV_API drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (sampleRateOut) { *sampleRateOut = 0; } if (channelsOut) { *channelsOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (sampleRateOut) { *sampleRateOut = 0; } if (channelsOut) { *channelsOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (sampleRateOut) { *sampleRateOut = 0; } if (channelsOut) { *channelsOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } #endif /* DR_WAV_NO_WCHAR */ #endif /* DR_WAV_NO_STDIO */ DRWAV_API drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } DRWAV_API drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks) { drwav wav; if (channelsOut) { *channelsOut = 0; } if (sampleRateOut) { *sampleRateOut = 0; } if (totalFrameCountOut) { *totalFrameCountOut = 0; } if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { return NULL; } return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); } #endif /* DR_WAV_NO_CONVERSION_API */ DRWAV_API void drwav_free(void* p, const drwav_allocation_callbacks* pAllocationCallbacks) { if (pAllocationCallbacks != NULL) { drwav__free_from_callbacks(p, pAllocationCallbacks); } else { drwav__free_default(p, NULL); } } DRWAV_API drwav_uint16 drwav_bytes_to_u16(const drwav_uint8* data) { return ((drwav_uint16)data[0] << 0) | ((drwav_uint16)data[1] << 8); } DRWAV_API drwav_int16 drwav_bytes_to_s16(const drwav_uint8* data) { return (drwav_int16)drwav_bytes_to_u16(data); } DRWAV_API drwav_uint32 drwav_bytes_to_u32(const drwav_uint8* data) { return drwav_bytes_to_u32_le(data); } DRWAV_API float drwav_bytes_to_f32(const drwav_uint8* data) { union { drwav_uint32 u32; float f32; } value; value.u32 = drwav_bytes_to_u32(data); return value.f32; } DRWAV_API drwav_int32 drwav_bytes_to_s32(const drwav_uint8* data) { return (drwav_int32)drwav_bytes_to_u32(data); } DRWAV_API drwav_uint64 drwav_bytes_to_u64(const drwav_uint8* data) { return ((drwav_uint64)data[0] << 0) | ((drwav_uint64)data[1] << 8) | ((drwav_uint64)data[2] << 16) | ((drwav_uint64)data[3] << 24) | ((drwav_uint64)data[4] << 32) | ((drwav_uint64)data[5] << 40) | ((drwav_uint64)data[6] << 48) | ((drwav_uint64)data[7] << 56); } DRWAV_API drwav_int64 drwav_bytes_to_s64(const drwav_uint8* data) { return (drwav_int64)drwav_bytes_to_u64(data); } DRWAV_API drwav_bool32 drwav_guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16]) { int i; for (i = 0; i < 16; i += 1) { if (a[i] != b[i]) { return DRWAV_FALSE; } } return DRWAV_TRUE; } DRWAV_API drwav_bool32 drwav_fourcc_equal(const drwav_uint8* a, const char* b) { return a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] == b[3]; } #ifdef __MRC__ /* Undo the pragma at the beginning of this file. */ #pragma options opt reset #endif #endif /* dr_wav_c */ #endif /* DR_WAV_IMPLEMENTATION */ /* REVISION HISTORY ================ v0.13.14 - 2023-12-02 - Fix a warning about an unused variable. v0.13.13 - 2023-11-02 - Fix a warning when compiling with Clang. v0.13.12 - 2023-08-07 - Fix a possible crash in drwav_read_pcm_frames(). v0.13.11 - 2023-07-07 - AIFF compatibility improvements. v0.13.10 - 2023-05-29 - Fix a bug where drwav_init_with_metadata() does not decode any frames after initializtion. v0.13.9 - 2023-05-22 - Add support for AIFF decoding (writing and metadata not supported). - Add support for RIFX decoding (writing and metadata not supported). - Fix a bug where metadata is not processed if it's located before the "fmt " chunk. - Add a workaround for a type of malformed WAV file where the size of the "RIFF" and "data" chunks are incorrectly set to 0xFFFFFFFF. v0.13.8 - 2023-03-25 - Fix a possible null pointer dereference. - Fix a crash when loading files with badly formed metadata. v0.13.7 - 2022-09-17 - Fix compilation with DJGPP. - Add support for disabling wchar_t with DR_WAV_NO_WCHAR. v0.13.6 - 2022-04-10 - Fix compilation error on older versions of GCC. - Remove some dependencies on the standard library. v0.13.5 - 2022-01-26 - Fix an error when seeking to the end of the file. v0.13.4 - 2021-12-08 - Fix some static analysis warnings. v0.13.3 - 2021-11-24 - Fix an incorrect assertion when trying to endian swap 1-byte sample formats. This is now a no-op rather than a failed assertion. - Fix a bug with parsing of the bext chunk. - Fix some static analysis warnings. v0.13.2 - 2021-10-02 - Fix a possible buffer overflow when reading from compressed formats. v0.13.1 - 2021-07-31 - Fix platform detection for ARM64. v0.13.0 - 2021-07-01 - Improve support for reading and writing metadata. Use the `_with_metadata()` APIs to initialize a WAV decoder and store the metadata within the `drwav` object. Use the `pMetadata` and `metadataCount` members of the `drwav` object to read the data. The old way of handling metadata via a callback is still usable and valid. - API CHANGE: drwav_target_write_size_bytes() now takes extra parameters for calculating the required write size when writing metadata. - Add drwav_get_cursor_in_pcm_frames() - Add drwav_get_length_in_pcm_frames() - Fix a bug where drwav_read_raw() can call the read callback with a byte count of zero. v0.12.20 - 2021-06-11 - Fix some undefined behavior. v0.12.19 - 2021-02-21 - Fix a warning due to referencing _MSC_VER when it is undefined. - Minor improvements to the management of some internal state concerning the data chunk cursor. v0.12.18 - 2021-01-31 - Clean up some static analysis warnings. v0.12.17 - 2021-01-17 - Minor fix to sample code in documentation. - Correctly qualify a private API as private rather than public. - Code cleanup. v0.12.16 - 2020-12-02 - Fix a bug when trying to read more bytes than can fit in a size_t. v0.12.15 - 2020-11-21 - Fix compilation with OpenWatcom. v0.12.14 - 2020-11-13 - Minor code clean up. v0.12.13 - 2020-11-01 - Improve compiler support for older versions of GCC. v0.12.12 - 2020-09-28 - Add support for RF64. - Fix a bug in writing mode where the size of the RIFF chunk incorrectly includes the header section. v0.12.11 - 2020-09-08 - Fix a compilation error on older compilers. v0.12.10 - 2020-08-24 - Fix a bug when seeking with ADPCM formats. v0.12.9 - 2020-08-02 - Simplify sized types. v0.12.8 - 2020-07-25 - Fix a compilation warning. v0.12.7 - 2020-07-15 - Fix some bugs on big-endian architectures. - Fix an error in s24 to f32 conversion. v0.12.6 - 2020-06-23 - Change drwav_read_*() to allow NULL to be passed in as the output buffer which is equivalent to a forward seek. - Fix a buffer overflow when trying to decode invalid IMA-ADPCM files. - Add include guard for the implementation section. v0.12.5 - 2020-05-27 - Minor documentation fix. v0.12.4 - 2020-05-16 - Replace assert() with DRWAV_ASSERT(). - Add compile-time and run-time version querying. - DRWAV_VERSION_MINOR - DRWAV_VERSION_MAJOR - DRWAV_VERSION_REVISION - DRWAV_VERSION_STRING - drwav_version() - drwav_version_string() v0.12.3 - 2020-04-30 - Fix compilation errors with VC6. v0.12.2 - 2020-04-21 - Fix a bug where drwav_init_file() does not close the file handle after attempting to load an erroneous file. v0.12.1 - 2020-04-13 - Fix some pedantic warnings. v0.12.0 - 2020-04-04 - API CHANGE: Add container and format parameters to the chunk callback. - Minor documentation updates. v0.11.5 - 2020-03-07 - Fix compilation error with Visual Studio .NET 2003. v0.11.4 - 2020-01-29 - Fix some static analysis warnings. - Fix a bug when reading f32 samples from an A-law encoded stream. v0.11.3 - 2020-01-12 - Minor changes to some f32 format conversion routines. - Minor bug fix for ADPCM conversion when end of file is reached. v0.11.2 - 2019-12-02 - Fix a possible crash when using custom memory allocators without a custom realloc() implementation. - Fix an integer overflow bug. - Fix a null pointer dereference bug. - Add limits to sample rate, channels and bits per sample to tighten up some validation. v0.11.1 - 2019-10-07 - Internal code clean up. v0.11.0 - 2019-10-06 - API CHANGE: Add support for user defined memory allocation routines. This system allows the program to specify their own memory allocation routines with a user data pointer for client-specific contextual data. This adds an extra parameter to the end of the following APIs: - drwav_init() - drwav_init_ex() - drwav_init_file() - drwav_init_file_ex() - drwav_init_file_w() - drwav_init_file_w_ex() - drwav_init_memory() - drwav_init_memory_ex() - drwav_init_write() - drwav_init_write_sequential() - drwav_init_write_sequential_pcm_frames() - drwav_init_file_write() - drwav_init_file_write_sequential() - drwav_init_file_write_sequential_pcm_frames() - drwav_init_file_write_w() - drwav_init_file_write_sequential_w() - drwav_init_file_write_sequential_pcm_frames_w() - drwav_init_memory_write() - drwav_init_memory_write_sequential() - drwav_init_memory_write_sequential_pcm_frames() - drwav_open_and_read_pcm_frames_s16() - drwav_open_and_read_pcm_frames_f32() - drwav_open_and_read_pcm_frames_s32() - drwav_open_file_and_read_pcm_frames_s16() - drwav_open_file_and_read_pcm_frames_f32() - drwav_open_file_and_read_pcm_frames_s32() - drwav_open_file_and_read_pcm_frames_s16_w() - drwav_open_file_and_read_pcm_frames_f32_w() - drwav_open_file_and_read_pcm_frames_s32_w() - drwav_open_memory_and_read_pcm_frames_s16() - drwav_open_memory_and_read_pcm_frames_f32() - drwav_open_memory_and_read_pcm_frames_s32() Set this extra parameter to NULL to use defaults which is the same as the previous behaviour. Setting this NULL will use DRWAV_MALLOC, DRWAV_REALLOC and DRWAV_FREE. - Add support for reading and writing PCM frames in an explicit endianness. New APIs: - drwav_read_pcm_frames_le() - drwav_read_pcm_frames_be() - drwav_read_pcm_frames_s16le() - drwav_read_pcm_frames_s16be() - drwav_read_pcm_frames_f32le() - drwav_read_pcm_frames_f32be() - drwav_read_pcm_frames_s32le() - drwav_read_pcm_frames_s32be() - drwav_write_pcm_frames_le() - drwav_write_pcm_frames_be() - Remove deprecated APIs. - API CHANGE: The following APIs now return native-endian data. Previously they returned little-endian data. - drwav_read_pcm_frames() - drwav_read_pcm_frames_s16() - drwav_read_pcm_frames_s32() - drwav_read_pcm_frames_f32() - drwav_open_and_read_pcm_frames_s16() - drwav_open_and_read_pcm_frames_s32() - drwav_open_and_read_pcm_frames_f32() - drwav_open_file_and_read_pcm_frames_s16() - drwav_open_file_and_read_pcm_frames_s32() - drwav_open_file_and_read_pcm_frames_f32() - drwav_open_file_and_read_pcm_frames_s16_w() - drwav_open_file_and_read_pcm_frames_s32_w() - drwav_open_file_and_read_pcm_frames_f32_w() - drwav_open_memory_and_read_pcm_frames_s16() - drwav_open_memory_and_read_pcm_frames_s32() - drwav_open_memory_and_read_pcm_frames_f32() v0.10.1 - 2019-08-31 - Correctly handle partial trailing ADPCM blocks. v0.10.0 - 2019-08-04 - Remove deprecated APIs. - Add wchar_t variants for file loading APIs: drwav_init_file_w() drwav_init_file_ex_w() drwav_init_file_write_w() drwav_init_file_write_sequential_w() - Add drwav_target_write_size_bytes() which calculates the total size in bytes of a WAV file given a format and sample count. - Add APIs for specifying the PCM frame count instead of the sample count when opening in sequential write mode: drwav_init_write_sequential_pcm_frames() drwav_init_file_write_sequential_pcm_frames() drwav_init_file_write_sequential_pcm_frames_w() drwav_init_memory_write_sequential_pcm_frames() - Deprecate drwav_open*() and drwav_close(): drwav_open() drwav_open_ex() drwav_open_write() drwav_open_write_sequential() drwav_open_file() drwav_open_file_ex() drwav_open_file_write() drwav_open_file_write_sequential() drwav_open_memory() drwav_open_memory_ex() drwav_open_memory_write() drwav_open_memory_write_sequential() drwav_close() - Minor documentation updates. v0.9.2 - 2019-05-21 - Fix warnings. v0.9.1 - 2019-05-05 - Add support for C89. - Change license to choice of public domain or MIT-0. v0.9.0 - 2018-12-16 - API CHANGE: Add new reading APIs for reading by PCM frames instead of samples. Old APIs have been deprecated and will be removed in v0.10.0. Deprecated APIs and their replacements: drwav_read() -> drwav_read_pcm_frames() drwav_read_s16() -> drwav_read_pcm_frames_s16() drwav_read_f32() -> drwav_read_pcm_frames_f32() drwav_read_s32() -> drwav_read_pcm_frames_s32() drwav_seek_to_sample() -> drwav_seek_to_pcm_frame() drwav_write() -> drwav_write_pcm_frames() drwav_open_and_read_s16() -> drwav_open_and_read_pcm_frames_s16() drwav_open_and_read_f32() -> drwav_open_and_read_pcm_frames_f32() drwav_open_and_read_s32() -> drwav_open_and_read_pcm_frames_s32() drwav_open_file_and_read_s16() -> drwav_open_file_and_read_pcm_frames_s16() drwav_open_file_and_read_f32() -> drwav_open_file_and_read_pcm_frames_f32() drwav_open_file_and_read_s32() -> drwav_open_file_and_read_pcm_frames_s32() drwav_open_memory_and_read_s16() -> drwav_open_memory_and_read_pcm_frames_s16() drwav_open_memory_and_read_f32() -> drwav_open_memory_and_read_pcm_frames_f32() drwav_open_memory_and_read_s32() -> drwav_open_memory_and_read_pcm_frames_s32() drwav::totalSampleCount -> drwav::totalPCMFrameCount - API CHANGE: Rename drwav_open_and_read_file_*() to drwav_open_file_and_read_*(). - API CHANGE: Rename drwav_open_and_read_memory_*() to drwav_open_memory_and_read_*(). - Add built-in support for smpl chunks. - Add support for firing a callback for each chunk in the file at initialization time. - This is enabled through the drwav_init_ex(), etc. family of APIs. - Handle invalid FMT chunks more robustly. v0.8.5 - 2018-09-11 - Const correctness. - Fix a potential stack overflow. v0.8.4 - 2018-08-07 - Improve 64-bit detection. v0.8.3 - 2018-08-05 - Fix C++ build on older versions of GCC. v0.8.2 - 2018-08-02 - Fix some big-endian bugs. v0.8.1 - 2018-06-29 - Add support for sequential writing APIs. - Disable seeking in write mode. - Fix bugs with Wave64. - Fix typos. v0.8 - 2018-04-27 - Bug fix. - Start using major.minor.revision versioning. v0.7f - 2018-02-05 - Restrict ADPCM formats to a maximum of 2 channels. v0.7e - 2018-02-02 - Fix a crash. v0.7d - 2018-02-01 - Fix a crash. v0.7c - 2018-02-01 - Set drwav.bytesPerSample to 0 for all compressed formats. - Fix a crash when reading 16-bit floating point WAV files. In this case dr_wav will output silence for all format conversion reading APIs (*_s16, *_s32, *_f32 APIs). - Fix some divide-by-zero errors. v0.7b - 2018-01-22 - Fix errors with seeking of compressed formats. - Fix compilation error when DR_WAV_NO_CONVERSION_API v0.7a - 2017-11-17 - Fix some GCC warnings. v0.7 - 2017-11-04 - Add writing APIs. v0.6 - 2017-08-16 - API CHANGE: Rename dr_* types to drwav_*. - Add support for custom implementations of malloc(), realloc(), etc. - Add support for Microsoft ADPCM. - Add support for IMA ADPCM (DVI, format code 0x11). - Optimizations to drwav_read_s16(). - Bug fixes. v0.5g - 2017-07-16 - Change underlying type for booleans to unsigned. v0.5f - 2017-04-04 - Fix a minor bug with drwav_open_and_read_s16() and family. v0.5e - 2016-12-29 - Added support for reading samples as signed 16-bit integers. Use the _s16() family of APIs for this. - Minor fixes to documentation. v0.5d - 2016-12-28 - Use drwav_int* and drwav_uint* sized types to improve compiler support. v0.5c - 2016-11-11 - Properly handle JUNK chunks that come before the FMT chunk. v0.5b - 2016-10-23 - A minor change to drwav_bool8 and drwav_bool32 types. v0.5a - 2016-10-11 - Fixed a bug with drwav_open_and_read() and family due to incorrect argument ordering. - Improve A-law and mu-law efficiency. v0.5 - 2016-09-29 - API CHANGE. Swap the order of "channels" and "sampleRate" parameters in drwav_open_and_read*(). Rationale for this is to keep it consistent with dr_audio and dr_flac. v0.4b - 2016-09-18 - Fixed a typo in documentation. v0.4a - 2016-09-18 - Fixed a typo. - Change date format to ISO 8601 (YYYY-MM-DD) v0.4 - 2016-07-13 - API CHANGE. Make onSeek consistent with dr_flac. - API CHANGE. Rename drwav_seek() to drwav_seek_to_sample() for clarity and consistency with dr_flac. - Added support for Sony Wave64. v0.3a - 2016-05-28 - API CHANGE. Return drwav_bool32 instead of int in onSeek callback. - Fixed a memory leak. v0.3 - 2016-05-22 - Lots of API changes for consistency. v0.2a - 2016-05-16 - Fixed Linux/GCC build. v0.2 - 2016-05-11 - Added support for reading data as signed 32-bit PCM for consistency with dr_flac. v0.1a - 2016-05-07 - Fixed a bug in drwav_open_file() where the file handle would not be closed if the loader failed to initialize. v0.1 - 2016-05-04 - Initial versioned release. */ /* This software is available as a choice of the following licenses. Choose whichever you prefer. =============================================================================== ALTERNATIVE 1 - Public Domain (www.unlicense.org) =============================================================================== This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> =============================================================================== ALTERNATIVE 2 - MIT No Attribution =============================================================================== Copyright 2023 David Reid Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
351,403
C++
.h
7,168
41.52567
279
0.65378
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,826
numeric_range_trie.h
typesense_typesense/include/numeric_range_trie.h
#pragma once #include <ids_t.h> constexpr short EXPANSE = 256; class NumericTrie { char max_level = 4; class Node { Node** children = nullptr; void* seq_ids = SET_COMPACT_IDS(compact_id_list_t::create(0, {})); void insert_helper(const int64_t& value, const uint32_t& seq_id, char& level, const char& max_level); void insert_geopoint_helper(const uint64_t& cell_id, const uint32_t& seq_id, char& level, const char& max_level); void search_geopoints_helper(const uint64_t& cell_id, const char& max_index_level, std::set<Node*>& matches); void search_range_helper(const int64_t& low,const int64_t& high, const char& max_level, std::vector<Node*>& matches); void search_less_than_helper(const int64_t& value, char& level, const char& max_level, std::vector<Node*>& matches); void search_greater_than_helper(const int64_t& value, char& level, const char& max_level, std::vector<Node*>& matches); void seq_ids_outside_top_k_helper(const size_t& k, size_t& ids_skipped, char& level, const char& max_level, const bool& is_negative, std::vector<uint32_t>& result); public: ~Node() { ids_t::destroy_list(seq_ids); if (children != nullptr) { for (auto i = 0; i < EXPANSE; i++) { delete children[i]; } } delete [] children; } void insert(const int64_t& cell_id, const uint32_t& seq_id, const char& max_level); void remove(const int64_t& cell_id, const uint32_t& seq_id, const char& max_level); void insert_geopoint(const uint64_t& cell_id, const uint32_t& seq_id, const char& max_level); void search_geopoints(const std::vector<uint64_t>& cell_ids, const char& max_level, std::vector<uint32_t>& geo_result_ids); void delete_geopoint(const uint64_t& cell_id, uint32_t id, const char& max_level); void get_all_ids(uint32_t*& ids, uint32_t& ids_length); void get_all_ids(std::vector<uint32_t>& result); uint32_t get_ids_length(); void search_range(const int64_t& low, const int64_t& high, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_range(const int64_t& low, const int64_t& high, const char& max_level, std::vector<Node*>& matches); void search_less_than(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_less_than(const int64_t& value, const char& max_level, std::vector<Node*>& matches); void search_greater_than(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_greater_than(const int64_t& value, const char& max_level, std::vector<Node*>& matches); void search_equal_to(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_equal_to(const int64_t& value, const char& max_level, std::vector<Node*>& matches); void seq_ids_outside_top_k(const size_t& k, const char& max_level, size_t& ids_skipped, std::vector<uint32_t>& result, const bool& is_negative = false); }; Node* negative_trie = nullptr; Node* positive_trie = nullptr; public: explicit NumericTrie(char num_bits = 32) { max_level = num_bits / 8; } ~NumericTrie() { delete negative_trie; delete positive_trie; } class iterator_t { struct match_state { uint32_t* ids = nullptr; uint32_t ids_length = 0; uint32_t index = 0; explicit match_state(uint32_t*& ids, uint32_t& ids_length) : ids(ids), ids_length(ids_length) {} ~match_state() { delete [] ids; } }; std::vector<match_state*> matches; void set_seq_id(); public: explicit iterator_t(std::vector<Node*>& matches); ~iterator_t() { for (auto& match: matches) { delete match; } } iterator_t& operator=(iterator_t&& obj) noexcept; uint32_t seq_id = 0; bool is_valid = true; void next(); void skip_to(uint32_t id); void reset(); }; void insert(const int64_t& value, const uint32_t& seq_id); void remove(const int64_t& value, const uint32_t& seq_id); void insert_geopoint(const uint64_t& cell_id, const uint32_t& seq_id); void search_geopoints(const std::vector<uint64_t>& cell_ids, std::vector<uint32_t>& geo_result_ids); void delete_geopoint(const uint64_t& cell_id, uint32_t id); void search_range(const int64_t& low, const bool& low_inclusive, const int64_t& high, const bool& high_inclusive, uint32_t*& ids, uint32_t& ids_length); iterator_t search_range(const int64_t& low, const bool& low_inclusive, const int64_t& high, const bool& high_inclusive); void search_less_than(const int64_t& value, const bool& inclusive, uint32_t*& ids, uint32_t& ids_length); iterator_t search_less_than(const int64_t& value, const bool& inclusive); void search_greater_than(const int64_t& value, const bool& inclusive, uint32_t*& ids, uint32_t& ids_length); iterator_t search_greater_than(const int64_t& value, const bool& inclusive); void search_equal_to(const int64_t& value, uint32_t*& ids, uint32_t& ids_length); iterator_t search_equal_to(const int64_t& value); void seq_ids_outside_top_k(const size_t& k, std::vector<uint32_t>& result); size_t size(); };
5,931
C++
.h
107
43.925234
121
0.601007
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,827
conversation_model_manager.h
typesense_typesense/include/conversation_model_manager.h
#pragma once #include <shared_mutex> #include <mutex> #include <unordered_map> #include <json.hpp> #include <option.h> #include "store.h" #include "sole.hpp" #include "collection.h" class ConversationModelManager { public: ConversationModelManager() = delete; ConversationModelManager(const ConversationModelManager&) = delete; ConversationModelManager(ConversationModelManager&&) = delete; ConversationModelManager& operator=(const ConversationModelManager&) = delete; static Option<nlohmann::json> get_model(const std::string& model_id); static Option<bool> add_model(nlohmann::json& model, const std::string& model_id, const bool write_to_disk); static Option<nlohmann::json> delete_model(const std::string& model_id); static Option<nlohmann::json> update_model(const std::string& model_id, nlohmann::json model); static Option<nlohmann::json> get_all_models(); static Option<int> init(Store* store); static bool migrate_model(nlohmann::json& model); static std::unordered_set<std::string> get_history_collections(); // For testing Purpose only static void insert_model_for_testing(const std::string& model_id, nlohmann::json model) { std::unique_lock lock(models_mutex); models[model_id] = model; } private: static inline std::unordered_map<std::string, nlohmann::json> models; static inline std::shared_mutex models_mutex; static constexpr char* MODEL_NEXT_ID = "$CVMN"; static constexpr char* MODEL_KEY_PREFIX = "$CVMP"; static inline int64_t DEFAULT_HISTORY_COLLECTION_SUFFIX = 0; static inline Store* store; static const std::string get_model_key(const std::string& model_id); static Option<Collection*> create_default_history_collection(const std::string& model_id); static Option<nlohmann::json> delete_model_unsafe(const std::string& model_id); };
2,026
C++
.h
41
41.902439
102
0.686017
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,828
store.h
typesense_typesense/include/store.h
#pragma once #include <stdint.h> #include <cstdlib> #include <string> #include <sstream> #include <memory> #include <mutex> #include <thread> #include <shared_mutex> #include <option.h> #include <rocksdb/db.h> #include <rocksdb/write_batch.h> #include <rocksdb/options.h> #include <rocksdb/merge_operator.h> #include <rocksdb/transaction_log.h> #include <butil/file_util.h> #include <mutex> #include <rocksdb/utilities/checkpoint.h> #include <rocksdb/utilities/table_properties_collectors.h> #include "string_utils.h" #include "logger.h" #include "file_utils.h" #include <rocksdb/utilities/db_ttl.h> #define FOURWEEKS_SECS 2419200 class UInt64AddOperator : public rocksdb::AssociativeMergeOperator { public: virtual bool Merge(const rocksdb::Slice& key, const rocksdb::Slice* existing_value, const rocksdb::Slice& value, std::string* new_value, rocksdb::Logger* logger) const override { uint64_t existing = 0; if (existing_value) { existing = StringUtils::deserialize_uint32_t(existing_value->ToString()); } *new_value = StringUtils::serialize_uint32_t(existing + StringUtils::deserialize_uint32_t(value.ToString())); return true; } virtual const char* Name() const override { return "UInt64AddOperator"; } }; enum StoreStatus { FOUND, NOT_FOUND, ERROR }; /* * Abstraction for underlying KV store (RocksDB) */ class Store { private: const std::string state_dir_path; rocksdb::DB *db; rocksdb::Options options; rocksdb::WriteOptions write_options; // Used to protect assignment to DB handle, which is otherwise thread safe // So we use unique lock only for assignment, but shared locks for all other operations on DB mutable std::shared_mutex mutex; rocksdb::Status init_db(int32_t ttl); public: Store() = delete; Store(const std::string & state_dir_path, const size_t wal_ttl_secs = 24*60*60, const size_t wal_size_mb = 1024, bool disable_wal = true, int32_t ttl=0); ~Store(); bool insert(const std::string& key, const std::string& value); bool batch_write(rocksdb::WriteBatch& batch); bool contains(const std::string& key) const; StoreStatus get(const std::string& key, std::string& value) const; bool remove(const std::string& key); rocksdb::Iterator* scan(const std::string & prefix, const rocksdb::Slice* iterate_upper_bound); rocksdb::Iterator* get_iterator(); void scan_fill(const std::string& prefix_start, const std::string& prefix_end, std::vector<std::string> & values); void increment(const std::string & key, uint32_t value); uint64_t get_latest_seq_number() const; Option<std::vector<std::string>*> get_updates_since(const uint64_t seq_number_org, const uint64_t max_updates) const; void close(); int reload(bool clear_state_dir, const std::string& snapshot_path, int32_t ttl = 0); void flush(); rocksdb::Status compact_all(); rocksdb::Status create_check_point(rocksdb::Checkpoint** checkpoint_ptr, const std::string& db_snapshot_path); rocksdb::Status delete_range(const std::string& begin_key, const std::string& end_key); rocksdb::Status compact_range(const rocksdb::Slice& begin_key, const rocksdb::Slice& end_key); // Only for internal tests rocksdb::DB* _get_db_unsafe() const; const std::string& get_state_dir_path() const; const rocksdb::Options &get_db_options() const; void print_memory_usage(); void get_last_N_values(const std::string& userid_prefix, uint32_t N, std::vector<std::string>& values); };
3,656
C++
.h
90
35.977778
121
0.705183
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,829
vector_query_ops.h
typesense_typesense/include/vector_query_ops.h
#pragma once #include <string> #include <vector> #include "option.h" #include <limits> class Collection; struct vector_query_t { std::string field_name; size_t k = 0; size_t flat_search_cutoff = 0; float distance_threshold = std::numeric_limits<float>::max(); std::vector<float> values; uint32_t seq_id = 0; bool query_doc_given = false; float alpha = 0.3; uint32_t ef = 10; std::vector<std::string> queries; std::vector<float> query_weights; void _reset() { // used for testing only field_name.clear(); k = 0; distance_threshold = 2.01; values.clear(); seq_id = 0; query_doc_given = false; } }; class VectorQueryOps { public: static Option<bool> parse_vector_query_str(const std::string& vector_query_str, vector_query_t& vector_query, const bool is_wildcard_query, const Collection* coll, const bool allow_empty_query); };
1,085
C++
.h
35
22.571429
113
0.569511
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,830
wyhash_v5.h
typesense_typesense/include/wyhash_v5.h
// Author: Wang Yi <godspeed_china@yeah.net> #ifndef wyhash_version_5 #define wyhash_version_5 #include <stdint.h> #include <string.h> #if defined(_MSC_VER) && defined(_M_X64) #include <intrin.h> #pragma intrinsic(_umul128) #endif #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) #define _likely_(x) __builtin_expect(x,1) #else #define _likely_(x) (x) #endif const uint64_t _wyp[6]={0xa0761d6478bd642full,0xe7037ed1a0b428dbull,0x8ebc6af09c88c6e3ull,0x589965cc75374cc3ull,0x1d8e4e27c47d124full,0x72b22b96e169b471ull};//default secret static inline uint64_t _wyrotr(uint64_t v, unsigned k){ return (v>>k)|(v<<(64-k)); } static inline uint64_t _wymum(uint64_t A, uint64_t B){ #ifdef UNOFFICIAL_WYHASH_32BIT // fast on 32 bit system uint64_t hh=(A>>32)*(B>>32), hl=(A>>32)*(unsigned)B, lh=(unsigned)A*(B>>32), ll=(uint64_t)(unsigned)A*(unsigned)B; return _wyrotr(hl,32)^_wyrotr(lh,32)^hh^ll; #else #ifdef __SIZEOF_INT128__ __uint128_t r=A; r*=B; return (r>>64)^r; #elif defined(_MSC_VER) && defined(_M_X64) A=_umul128(A, B, &B); return A^B; #else uint64_t ha=A>>32, hb=B>>32, la=(uint32_t)A, lb=(uint32_t)B, hi, lo; uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl; lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c; return hi^lo; #endif #endif } static inline uint64_t _wymix(uint64_t A, uint64_t B){ #ifdef UNOFFICIAL_WYHASH_FAST //lose entropy with probability 2^-66 per byte return _wymum(A,B); #else return A^B^_wymum(A,B); #endif } static inline uint64_t wyrand(uint64_t *seed){ *seed+=_wyp[0]; return _wymum(*seed^_wyp[1],*seed); } static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r>>11)*_wynorm; } static inline double wy2gau(uint64_t r){ const double _wynorm=1.0/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+((r>>42)&0x1fffff))*_wynorm-3.0; } #ifndef WYHASH_LITTLE_ENDIAN #if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define WYHASH_LITTLE_ENDIAN 1 #elif defined(__BIG_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) #define WYHASH_LITTLE_ENDIAN 0 #endif #endif #if(WYHASH_LITTLE_ENDIAN) static inline uint64_t _wyr8(const uint8_t *p){ uint64_t v; memcpy(&v, p, 8); return v; } static inline uint64_t _wyr4(const uint8_t *p){ unsigned v; memcpy(&v, p, 4); return v; } #else #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) static inline uint64_t _wyr8(const uint8_t *p){ uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v); } static inline uint64_t _wyr4(const uint8_t *p){ unsigned v; memcpy(&v, p, 4); return __builtin_bswap32(v); } #elif defined(_MSC_VER) static inline uint64_t _wyr8(const uint8_t *p){ uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);} static inline uint64_t _wyr4(const uint8_t *p){ unsigned v; memcpy(&v, p, 4); return _byteswap_ulong(v); } #endif #endif static inline uint64_t _wyr3(const uint8_t *p, unsigned k){ return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1]; } static inline uint64_t FastestHash(const void *key, size_t len, uint64_t seed){ const uint8_t *p=(const uint8_t*)key; return _likely_(len>=4)?(_wyr4(p)+_wyr4(p+len-4))*(_wyr4(p+(len>>1)-2)^seed):(_likely_(len)?_wyr3(p,len)*(_wyp[0]^seed):seed); } static inline uint64_t _wyhash(const void* key, uint64_t len, uint64_t seed, const uint64_t secret[6]){ const uint8_t *p=(const uint8_t*)key; uint64_t i=len; seed^=secret[4]; if(_likely_(i<=64)){ label: if(_likely_(i>=8)){ if(_likely_(i<=16)) return _wymix(_wyr8(p)^secret[0],_wyr8(p+i-8)^seed); else if(_likely_(i<=32)) return _wymix(_wyr8(p)^secret[0],_wyr8(p+8)^seed)^_wymix(_wyr8(p+i-16)^secret[1],_wyr8(p+i-8)^seed); else return _wymix(_wyr8(p)^secret[0],_wyr8(p+8)^seed)^_wymix(_wyr8(p+16)^secret[1],_wyr8(p+24)^seed) ^_wymix(_wyr8(p+i-32)^secret[2],_wyr8(p+i-24)^seed)^_wymix(_wyr8(p+i-16)^secret[3],_wyr8(p+i-8)^seed); } else { if(_likely_(i>=4)) return _wymix(_wyr4(p)^secret[0],_wyr4(p+i-4)^seed); else return _wymix((_likely_(i)?_wyr3(p,i):0)^secret[0],seed); } } uint64_t see1=seed, see2=seed, see3=seed; for(; i>64; i-=64,p+=64){ seed=_wymix(_wyr8(p)^secret[0],_wyr8(p+8)^seed); see1=_wymix(_wyr8(p+16)^secret[1],_wyr8(p+24)^see1); see2=_wymix(_wyr8(p+32)^secret[2],_wyr8(p+40)^see2); see3=_wymix(_wyr8(p+48)^secret[3],_wyr8(p+56)^see3); } seed^=see1^see2^see3; goto label; } static inline uint64_t wyhash(const void* key, uint64_t len, uint64_t seed, const uint64_t secret[6]){ return _wymum(_wyhash(key,len,seed,secret)^len,secret[5]); } static inline void make_secret(uint64_t seed, uint64_t secret[6]){ uint8_t c[]= {15,23,27,29,30,39,43,45,46,51,53,54,57,58,60,71,75,77,78,83,85,86,89,90,92,99,101,102,105,106,108,113,114,116,120,135,139,141,142,147,149,150,153,154,156,163,165,166,169,170,172,177,178,180,184,195,197,198,201,202,204,209,210,212,216,225,226,228,232,240}; for(size_t i=0; i<6; i++){ uint8_t ok; do{ ok=1; secret[i]=0; for(size_t j=0; j<64; j+=8) secret[i]|=((uint64_t)c[wyrand(&seed)%sizeof(c)])<<j; for(size_t j=0; j<i; j++) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) if(__builtin_popcountll(secret[i]^secret[j])!=32) ok=0; #elif defined(_MSC_VER) if(_mm_popcnt_u64(secret[i]^secret[j])!=32) ok=0; #endif if(!ok) continue; for(size_t j=2; j<0x100000000ull; j++) if(secret[i]%j==0){ ok=0; break; } }while(!ok); } } static inline uint64_t wyhash64(uint64_t A, uint64_t B){ return _wymum(_wymum(A^_wyp[0],B^_wyp[1]),_wyp[2]); } typedef struct wyhash_context { uint64_t secret[5]; uint64_t seed, see1, see2, see3; uint8_t buffer[64]; uint8_t left; // always in [0, 64] int loop; uint64_t total; } wyhash_context_t; static inline void wyhash_init(wyhash_context_t *const __restrict ctx, const uint64_t seed, const uint64_t secret[5]){ memcpy(ctx->secret, secret, sizeof(ctx->secret)); ctx->seed=seed^secret[4]; ctx->see1=ctx->seed; ctx->see2=ctx->seed; ctx->see3=ctx->seed; ctx->left=0; ctx->total=0; ctx->loop=0; } static inline uint64_t _wyhash_loop(wyhash_context_t *const __restrict ctx, const uint8_t *p, const uint64_t len){ uint64_t i = len; ctx->loop|=(i>64); for(; i>64; i-=64,p+=64){ ctx->seed=_wymix(_wyr8(p)^ctx->secret[0],_wyr8(p+8)^ctx->seed); ctx->see1=_wymix(_wyr8(p+16)^ctx->secret[1],_wyr8(p+24)^ctx->see1); ctx->see2=_wymix(_wyr8(p+32)^ctx->secret[2],_wyr8(p+40)^ctx->see2); ctx->see3=_wymix(_wyr8(p+48)^ctx->secret[3],_wyr8(p+56)^ctx->see3); } return len - i; } static inline void wyhash_update(wyhash_context_t *const __restrict ctx, const void* const key, uint64_t len){ ctx->total += len; // overflow for total length is ok const uint8_t* p = (const uint8_t*)key; uint8_t slots = 64 - ctx->left; // assert left <= 64 slots = len <= slots ? len : slots; memcpy(ctx->buffer + ctx->left, p, slots); p += slots; len -= slots; ctx->left += slots; ctx->left -= _wyhash_loop(ctx, ctx->buffer, ctx->left + (len > 0)); const uint64_t consumed = _wyhash_loop(ctx, p, len); p += consumed; len -= consumed; // assert len <= 64 ctx->left = ctx->left > len ? ctx->left : (uint8_t)len; memcpy(ctx->buffer, p, len); } static inline uint64_t wyhash_final(wyhash_context_t *const __restrict ctx){ if(_likely_(ctx->loop)) ctx->seed ^= ctx->see1 ^ ctx->see2 ^ ctx->see3; return _wymum(_wyhash(ctx->buffer, ctx->left, ctx->seed ^ ctx->secret[4], ctx->secret)^ctx->total,ctx->secret[4]); } #endif
7,843
C++
.h
153
47.124183
273
0.635631
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,831
event_manager.h
typesense_typesense/include/event_manager.h
#pragma once #include "json.hpp" #include "option.h" class EventManager { private: EventManager() = default; ~EventManager() = default; static constexpr char* EVENT_TYPE = "type"; static constexpr char* EVENT_DATA = "data"; static constexpr char* EVENT_NAME = "name"; public: static EventManager& get_instance() { static EventManager instance; return instance; } EventManager(EventManager const&) = delete; void operator=(EventManager const&) = delete; Option<bool> add_event(const nlohmann::json& event, const std::string& ip); };
595
C++
.h
19
27
79
0.697715
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,832
cvt.h
typesense_typesense/include/cvt.h
#pragma once /* Compact Variable Trie ================================================================================================================ ates, at, as, but, tok, too [ * ] ⁄ | \ a b t-o / \ \ /\ s t utØ k o / / \ / \ Ø esØ Ø Ø Ø BASIC DESIGN ============ * All nodes in the tree level in the same block. * Pointer to ONLY FIRST child node of each sibling. * Each sibling node's children represented by their character/byte * For root there are no siblings, so pointer only to `a` child. * Each node can be a single-char prefix, multi-char prefix or leaf. ROOT -> [0|PTRA][3][a][b][t] PTRA -> [0|PTRS] [2|PTRU] [4|PTRK] [2][s][t] [2][u][t] [2][k][o] PTRS -> [0|PTRØ][1|PTRE][1][Ø][2][e][Ø] PTRT -> [0|L_PTRE][3|PTRØ][3][e][s][Ø][1][Ø] (path compression) PTRØ -> [0|LEAF] [OFFSET][PTR][TYPE]..[NUM_CHILDREN][A][B]..[X] [ 16 ][45][ 3 ] (64 bits) 2 bytes for type+offset 6 bytes for address 1 byte for num_children x bytes for bytes Actual offset to the Nth node's children: (8*NUM_CHILDREN) + N + offset if num_children >= 32: Use bitset to represent children present Read 32 bytes and do bitset operations to extract matched index else: Use array to represent children Read `num_children` bytes and do sequential search Multi-char node (COMPRESSED node) will be packed as: [num_prefix][prefixes][num_children][children] Removal of [be] 1. Realloc contents of PTR1 by removing "e" from the nodes list 2. Free PTR3 3. Realloc contents of ROOT by removing "b" from the nodes list */ #include <cstdint> #include <cstddef> #include "logger.h" struct cvt_leaf_t { size_t value; }; enum CVT_NODE { INTERNAL = 0, LEAF = 1, COMPRESSED = 2, }; class CVTrie { private: size_t size; uint8_t* root; const uintptr_t PTR_MASK = ~(1ULL << 48ULL); public: CVTrie(): root(nullptr) { } inline void* get_ptr(const void* tagged_ptr) { // Right shift of signed integer for sign extension is implementation-defined but works on major compilers return (void*)( ((intptr_t)((uintptr_t)tagged_ptr << 16ULL) >> 16ULL) & ~3 ); } inline void* tag_ptr(const void* ptr, const uint16_t offset, const CVT_NODE node_type) { return (void*)(((uintptr_t)ptr & PTR_MASK) | (uint64_t(offset) << 48ULL) | uint64_t(node_type)); } inline uint8_t get_node_type(const void* tagged_ptr) { return (uintptr_t)(tagged_ptr) & 3; } inline uint16_t get_offset(const void* ptr) { return (uintptr_t)(ptr) >> 48ULL; } void* find(const char* key, const uint8_t length); bool add(const char* key, const uint8_t length, void* value); };
2,829
C++
.h
78
31.717949
114
0.603493
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,834
adi_tree.h
typesense_typesense/include/adi_tree.h
#pragma once #include <string> #include "sparsepp.h" struct adi_node_t; class adi_tree_t { private: spp::sparse_hash_map<uint32_t, std::string> id_keys; adi_node_t* root = nullptr; static void add_node(adi_node_t* node, const std::string& key, size_t key_index); static bool rank_aggregate(adi_node_t* node, const std::string& key, size_t key_index, size_t& rank); static adi_node_t* get_node(adi_node_t* node, const std::string& key, const size_t key_index, std::vector<adi_node_t*>& path); void remove_node(adi_node_t* node, const std::string& key, const size_t key_index); public: static constexpr size_t NOT_FOUND = INT64_MAX; adi_tree_t(); ~adi_tree_t(); void index(uint32_t id, const std::string& key); size_t rank(uint32_t id); void remove(uint32_t id); const adi_node_t* get_root(); };
893
C++
.h
22
35.090909
105
0.65771
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,835
synonym_index.h
typesense_typesense/include/synonym_index.h
#pragma once #include <set> #include "sparsepp.h" #include "json.hpp" #include "string_utils.h" #include "option.h" #include "tokenizer.h" #include "store.h" #include "art.h" struct synonym_t { std::string id; std::string raw_root; // used in code and differs from API + storage format std::vector<std::string> root; std::vector<std::string> raw_synonyms; // used in code and differs from API + storage format std::vector<std::vector<std::string>> synonyms; std::string locale; std::vector<char> symbols; synonym_t() = default; nlohmann::json to_view_json() const; static Option<bool> parse(const nlohmann::json& synonym_json, synonym_t& syn); static uint64_t get_hash(const std::vector<std::string>& tokens) { uint64_t hash = 1; for(size_t i=0; i < tokens.size(); i++) { auto& token = tokens[i]; uint64_t token_hash = StringUtils::hash_wy(token.c_str(), token.size()); if(i == 0) { hash = token_hash; } else { hash = StringUtils::hash_combine(hash, token_hash); } } return hash; } }; class SynonymIndex { private: mutable std::shared_mutex mutex; Store* store; spp::sparse_hash_map<std::string, uint32_t> synonym_ids_index_map; art_tree* synonym_index_tree; uint32_t synonym_index = 0; std::map<uint32_t, synonym_t> synonym_definitions; void synonym_reduction_internal(const std::vector<std::string>& tokens, const std::string& locale, size_t start_window_size, size_t start_index_pos, std::set<std::string>& processed_tokens, std::vector<std::vector<std::string>>& results, const std::vector<std::string>& orig_tokens, bool synonym_prefix, uint32_t synonym_num_typos) const; public: static constexpr const char* COLLECTION_SYNONYM_PREFIX = "$CY"; SynonymIndex(Store* store): store(store) { synonym_index_tree = new art_tree; art_tree_init(synonym_index_tree); } ~SynonymIndex() { art_tree_destroy(synonym_index_tree); delete synonym_index_tree; } static std::string get_synonym_key(const std::string & collection_name, const std::string & synonym_id); void synonym_reduction(const std::vector<std::string>& tokens, const std::string& locale, std::vector<std::vector<std::string>>& results, bool synonym_prefix, uint32_t synonym_num_typos) const; Option<std::map<uint32_t, synonym_t*>> get_synonyms(uint32_t limit=0, uint32_t offset=0); bool get_synonym(const std::string& id, synonym_t& synonym); Option<bool> add_synonym(const std::string & collection_name, const synonym_t& synonym, bool write_to_store = true); Option<bool> remove_synonym(const std::string & collection_name, const std::string & id); };
3,176
C++
.h
73
33.369863
108
0.594095
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,836
field.h
typesense_typesense/include/field.h
#pragma once #include <string> #include <s2/s2latlng.h> #include "option.h" #include "string_utils.h" #include "logger.h" #include "store.h" #include <sparsepp.h> #include <tsl/htrie_map.h> #include <filter.h> #include "json.hpp" #include "embedder_manager.h" #include "vector_query_ops.h" #include <mutex> #include "stemmer_manager.h" namespace field_types { // first field value indexed will determine the type static const std::string AUTO = "auto"; static const std::string OBJECT = "object"; static const std::string OBJECT_ARRAY = "object[]"; static const std::string STRING = "string"; static const std::string INT32 = "int32"; static const std::string INT64 = "int64"; static const std::string FLOAT = "float"; static const std::string BOOL = "bool"; static const std::string NIL = "nil"; static const std::string GEOPOINT = "geopoint"; static const std::string STRING_ARRAY = "string[]"; static const std::string INT32_ARRAY = "int32[]"; static const std::string INT64_ARRAY = "int64[]"; static const std::string FLOAT_ARRAY = "float[]"; static const std::string BOOL_ARRAY = "bool[]"; static const std::string GEOPOINT_ARRAY = "geopoint[]"; static const std::string IMAGE = "image"; static bool is_string_or_array(const std::string& type_def) { return type_def == "string*"; } static bool is_array(const std::string& type_def) { return type_def.size() > 2 && type_def[type_def.size() - 2] == '[' && type_def[type_def.size() - 1] == ']'; } } namespace fields { static const std::string name = "name"; static const std::string type = "type"; static const std::string facet = "facet"; static const std::string optional = "optional"; static const std::string index = "index"; static const std::string sort = "sort"; static const std::string infix = "infix"; static const std::string locale = "locale"; static const std::string nested = "nested"; static const std::string nested_array = "nested_array"; static const std::string num_dim = "num_dim"; static const std::string vec_dist = "vec_dist"; static const std::string reference = "reference"; static const std::string async_reference = "async_reference"; static const std::string embed = "embed"; static const std::string from = "from"; static const std::string model_name = "model_name"; static const std::string range_index = "range_index"; static const std::string stem = "stem"; // Some models require additional parameters to be passed to the model during indexing/querying // For e.g. e5-small model requires prefix "passage:" for indexing and "query:" for querying static const std::string indexing_prefix = "indexing_prefix"; static const std::string query_prefix = "query_prefix"; static const std::string api_key = "api_key"; static const std::string model_config = "model_config"; static const std::string reference_helper_fields = ".ref"; static const std::string REFERENCE_HELPER_FIELD_SUFFIX = "_sequence_id"; static const std::string store = "store"; static const std::string hnsw_params = "hnsw_params"; } enum vector_distance_type_t { ip, cosine }; struct reference_pair_t { std::string collection; std::string field; reference_pair_t(std::string collection, std::string field) : collection(std::move(collection)), field(std::move(field)) {} bool operator < (const reference_pair_t& other) const noexcept { if (collection == other.collection) { return field < other.field; } return collection < other.collection; } }; struct field { std::string name; std::string type; bool facet; bool optional; bool index; std::string locale; bool sort; bool infix; bool nested; // field inside an object bool store = true; // store the field in disk // field inside an array of objects that is forced to be an array // integer to handle tri-state: true (1), false (0), not known yet (2) // third state is used to diff between array of object and array within object during write int nested_array; size_t num_dim; nlohmann::json embed; vector_distance_type_t vec_dist; static constexpr int VAL_UNKNOWN = 2; std::string reference; // Foo.bar (reference to bar field in Foo collection). bool is_async_reference = false; bool range_index; bool is_reference_helper = false; bool stem = false; std::shared_ptr<Stemmer> stemmer; nlohmann::json hnsw_params; field() {} field(const std::string &name, const std::string &type, const bool facet, const bool optional = false, bool index = true, std::string locale = "", int sort = -1, int infix = -1, bool nested = false, int nested_array = 0, size_t num_dim = 0, vector_distance_type_t vec_dist = cosine, std::string reference = "", const nlohmann::json& embed = nlohmann::json(), const bool range_index = false, const bool store = true, const bool stem = false, const nlohmann::json hnsw_params = nlohmann::json(), const bool async_reference = false) : name(name), type(type), facet(facet), optional(optional), index(index), locale(locale), nested(nested), nested_array(nested_array), num_dim(num_dim), vec_dist(vec_dist), reference(reference), embed(embed), range_index(range_index), store(store), stem(stem), hnsw_params(hnsw_params), is_async_reference(async_reference) { set_computed_defaults(sort, infix); auto const suffix = std::string(fields::REFERENCE_HELPER_FIELD_SUFFIX); is_reference_helper = name.size() > suffix.size() && name.substr(name.size() - suffix.size()) == suffix; if (stem) { stemmer = StemmerManager::get_instance().get_stemmer(locale); } } void set_computed_defaults(int sort, int infix) { if(sort != -1) { this->sort = bool(sort); } else { this->sort = is_num_sort_field(); } this->infix = (infix != -1) ? bool(infix) : false; } bool operator<(const field& f) const { return name < f.name; } bool operator==(const field& f) const { return name == f.name; } bool is_auto() const { return (type == field_types::AUTO); } bool is_single_integer() const { return (type == field_types::INT32 || type == field_types::INT64); } bool is_single_float() const { return (type == field_types::FLOAT); } bool is_single_bool() const { return (type == field_types::BOOL); } bool is_single_geopoint() const { return (type == field_types::GEOPOINT); } bool is_image() const { return (type == field_types::IMAGE); } bool is_integer() const { return (type == field_types::INT32 || type == field_types::INT32_ARRAY || type == field_types::INT64 || type == field_types::INT64_ARRAY); } bool is_int32() const { return (type == field_types::INT32 || type == field_types::INT32_ARRAY); } bool is_int64() const { return (type == field_types::INT64 || type == field_types::INT64_ARRAY); } bool is_float() const { return (type == field_types::FLOAT || type == field_types::FLOAT_ARRAY); } bool is_bool() const { return (type == field_types::BOOL || type == field_types::BOOL_ARRAY); } bool is_geopoint() const { return (type == field_types::GEOPOINT || type == field_types::GEOPOINT_ARRAY); } bool is_object() const { return (type == field_types::OBJECT || type == field_types::OBJECT_ARRAY); } bool is_string() const { return (type == field_types::STRING || type == field_types::STRING_ARRAY); } bool is_string_star() const { return field_types::is_string_or_array(type); } bool is_facet() const { return facet; } bool is_array() const { return (type == field_types::STRING_ARRAY || type == field_types::INT32_ARRAY || type == field_types::FLOAT_ARRAY || type == field_types::INT64_ARRAY || type == field_types::BOOL_ARRAY || type == field_types::GEOPOINT_ARRAY || type == field_types::OBJECT_ARRAY); } bool is_singular() const { return !is_array(); } static bool is_dynamic(const std::string& name, const std::string& type) { return type == "string*" || (name != ".*" && type == field_types::AUTO) || (name != ".*" && name.find(".*") != std::string::npos); } bool is_dynamic() const { return is_dynamic(name, type); } bool has_numerical_index() const { return (type == field_types::INT32 || type == field_types::INT64 || type == field_types::FLOAT || type == field_types::BOOL); } bool is_num_sort_field() const { return (has_numerical_index() || is_geopoint()); } bool is_sort_field() const { return is_num_sort_field() || (type == field_types::STRING); } bool is_num_sortable() const { return sort && is_num_sort_field(); } bool is_str_sortable() const { return sort && type == field_types::STRING; } bool is_sortable() const { return is_num_sortable() || is_str_sortable(); } bool is_stem() const { return stem; } bool has_valid_type() const { bool is_basic_type = is_string() || is_integer() || is_float() || is_bool() || is_geopoint() || is_object() || is_auto() || is_image(); if(!is_basic_type) { return field_types::is_string_or_array(type); } return true; } std::string faceted_name() const { return (facet && !is_string()) ? "_fstr_" + name : name; } std::shared_ptr<Stemmer> get_stemmer() const { return stemmer; } static bool get_type(const nlohmann::json& obj, std::string& field_type) { if(obj.is_array()) { if(obj.empty()) { return false; } bool parseable = get_single_type(obj[0], field_type); if(!parseable) { return false; } field_type = field_type + "[]"; return true; } return get_single_type(obj, field_type); } static bool get_single_type(const nlohmann::json& obj, std::string& field_type) { if(obj.is_string()) { field_type = field_types::STRING; return true; } if(obj.is_number_float()) { field_type = field_types::FLOAT; return true; } if(obj.is_number_integer()) { field_type = field_types::INT64; return true; } if(obj.is_boolean()) { field_type = field_types::BOOL; return true; } if(obj.is_object()) { field_type = field_types::OBJECT; return true; } return false; } static Option<bool> fields_to_json_fields(const std::vector<field> & fields, const std::string & default_sorting_field, nlohmann::json& fields_json); static Option<bool> json_field_to_field(bool enable_nested_fields, nlohmann::json& field_json, std::vector<field>& the_fields, string& fallback_field_type, size_t& num_auto_detect_fields); static Option<bool> json_fields_to_fields(bool enable_nested_fields, nlohmann::json& fields_json, std::string& fallback_field_type, std::vector<field>& the_fields); static Option<bool> validate_and_init_embed_field(const tsl::htrie_map<char, field>& search_schema, nlohmann::json& field_json, const nlohmann::json& fields_json, field& the_field); static bool flatten_obj(nlohmann::json& doc, nlohmann::json& value, bool has_array, bool has_obj_array, bool is_update, const field& the_field, const std::string& flat_name, const std::unordered_map<std::string, field>& dyn_fields, std::unordered_map<std::string, field>& flattened_fields); static Option<bool> flatten_field(nlohmann::json& doc, nlohmann::json& obj, const field& the_field, std::vector<std::string>& path_parts, size_t path_index, bool has_array, bool has_obj_array, bool is_update, const std::unordered_map<std::string, field>& dyn_fields, std::unordered_map<std::string, field>& flattened_fields); static Option<bool> flatten_doc(nlohmann::json& document, const tsl::htrie_map<char, field>& nested_fields, const std::unordered_map<std::string, field>& dyn_fields, bool is_update, std::vector<field>& flattened_fields); static void compact_nested_fields(tsl::htrie_map<char, field>& nested_fields); }; enum index_operation_t { CREATE, UPSERT, UPDATE, EMPLACE, DELETE }; enum class DIRTY_VALUES { REJECT = 1, DROP = 2, COERCE_OR_REJECT = 3, COERCE_OR_DROP = 4, }; namespace sort_field_const { static const std::string name = "name"; static const std::string order = "order"; static const std::string asc = "ASC"; static const std::string desc = "DESC"; static const std::string text_match = "_text_match"; static const std::string eval = "_eval"; static const std::string seq_id = "_seq_id"; static const std::string group_found = "_group_found"; static const std::string exclude_radius = "exclude_radius"; static const std::string precision = "precision"; static const std::string missing_values = "missing_values"; static const std::string vector_distance = "_vector_distance"; static const std::string vector_query = "_vector_query"; static const std::string random_order = "_rand"; static const std::string origin = "origin"; static const std::string gauss = "gauss"; static const std::string exp = "exp"; static const std::string linear = "linear"; static const std::string scale = "scale"; static const std::string offset = "offset"; static const std::string decay = "decay"; static const std::string func = "func"; static const std::string diff = "diff"; } namespace ref_include { static const std::string strategy_key = "strategy"; static const std::string merge_string = "merge"; static const std::string nest_string = "nest"; static const std::string nest_array_string = "nest_array"; enum strategy_enum {merge = 0, nest, nest_array}; static Option<strategy_enum> string_to_enum(const std::string& strategy) { if (strategy == merge_string) { return Option<strategy_enum>(merge); } else if (strategy == nest_string) { return Option<strategy_enum>(nest); } else if (strategy == nest_array_string) { return Option<strategy_enum>(nest_array); } return Option<strategy_enum>(400, "Unknown include strategy `" + strategy + "`. " "Valid options are `merge`, `nest`, `nest_array`."); } } struct ref_include_exclude_fields { std::string collection_name; std::string include_fields; std::string exclude_fields; std::string alias; ref_include::strategy_enum strategy = ref_include::nest; // In case we have nested join. std::vector<ref_include_exclude_fields> nested_join_includes = {}; }; struct hnsw_index_t; struct sort_vector_query_t { vector_query_t query; hnsw_index_t* vector_index; }; struct sort_random_t { bool is_enabled = false; mutable std::mt19937 rng; mutable std::uniform_int_distribution<uint32_t> distrib; sort_random_t() : distrib(0, UINT32_MAX) {}; sort_random_t& operator=(const sort_random_t& other) { rng = other.rng; distrib = other.distrib; is_enabled = other.is_enabled; } void initialize(uint32_t seed) { rng.seed(seed); is_enabled = true; } uint32_t generate_random() const { return distrib(rng); } }; struct sort_by { enum missing_values_t { first, last, normal, }; enum sort_by_params_t { none, diff, gauss, exp, linear, }; struct eval_t { filter_node_t** filter_trees = nullptr; // Array of filter_node_t pointers. std::vector<uint32_t*> eval_ids_vec; std::vector<uint32_t> eval_ids_count_vec; std::vector<int64_t> scores; }; std::string name; std::vector<std::string> eval_expressions; std::string order; // for text_match score bucketing uint32_t text_match_buckets; // geo related fields int64_t geopoint; uint32_t exclude_radius; uint32_t geo_precision; missing_values_t missing_values; eval_t eval; std::string reference_collection_name; std::vector<std::string> nested_join_collection_names; sort_vector_query_t vector_query; sort_random_t random_sort; int64_t origin_val = INT64_MAX; int64_t scale = INT64_MAX; int64_t offset = 0; float decay_val = 0.5f; sort_by_params_t sort_by_param = none; sort_by(const std::string & name, const std::string & order): name(name), order(order), text_match_buckets(0), geopoint(0), exclude_radius(0), geo_precision(0), missing_values(normal) { } sort_by(std::vector<std::string> eval_expressions, std::vector<int64_t> scores, std::string order): eval_expressions(std::move(eval_expressions)), order(std::move(order)), text_match_buckets(0), geopoint(0), exclude_radius(0), geo_precision(0), missing_values(normal) { name = sort_field_const::eval; eval.scores = std::move(scores); } sort_by(const std::string &name, const std::string &order, uint32_t text_match_buckets, int64_t geopoint, uint32_t exclude_radius, uint32_t geo_precision) : name(name), order(order), text_match_buckets(text_match_buckets), geopoint(geopoint), exclude_radius(exclude_radius), geo_precision(geo_precision), missing_values(normal) { } sort_by(const sort_by& other) { if (&other == this) return; name = other.name; eval_expressions = other.eval_expressions; order = other.order; text_match_buckets = other.text_match_buckets; geopoint = other.geopoint; exclude_radius = other.exclude_radius; geo_precision = other.geo_precision; missing_values = other.missing_values; eval = other.eval; reference_collection_name = other.reference_collection_name; nested_join_collection_names = other.nested_join_collection_names; vector_query = other.vector_query; random_sort = other.random_sort; sort_by_param = other.sort_by_param; origin_val = other.origin_val; scale = other.scale; offset = other.offset; decay_val = other.decay_val; } sort_by& operator=(const sort_by& other) { if (&other == this) { return *this; } name = other.name; eval_expressions = other.eval_expressions; order = other.order; text_match_buckets = other.text_match_buckets; geopoint = other.geopoint; exclude_radius = other.exclude_radius; geo_precision = other.geo_precision; missing_values = other.missing_values; eval = other.eval; reference_collection_name = other.reference_collection_name; nested_join_collection_names = other.nested_join_collection_names; return *this; } [[nodiscard]] inline bool is_nested_join_sort_by() const { return nested_join_collection_names.size() > 1; } }; class GeoPoint { constexpr static const double EARTH_RADIUS = 3958.75; constexpr static const double METER_CONVERT = 1609.00; constexpr static const uint64_t MASK_H32_BITS = 0xffffffffUL; public: static uint64_t pack_lat_lng(double lat, double lng) { // https://stackoverflow.com/a/1220393/131050 const int32_t ilat = lat * 1000000; const int32_t ilng = lng * 1000000; // during int32_t -> uint64_t, higher order bits will be 1, so we have to mask that const uint64_t lat_lng = (uint64_t(ilat) << 32) | (uint64_t)(ilng & MASK_H32_BITS); return lat_lng; } static void unpack_lat_lng(uint64_t packed_lat_lng, S2LatLng& latlng) { const double lat = double(int32_t((packed_lat_lng >> 32) & MASK_H32_BITS)) / 1000000; const double lng = double(int32_t(packed_lat_lng & MASK_H32_BITS)) / 1000000; latlng = S2LatLng::FromDegrees(lat, lng); } // distance in meters static int64_t distance(const S2LatLng& a, const S2LatLng& b) { double rdist = a.GetDistance(b).radians(); double dist = EARTH_RADIUS * rdist; return dist * METER_CONVERT; } }; struct facet_count_t { uint32_t count = 0; // for value based faceting, actual value is stored here std::string fvalue; // for hash based faceting, hash value is stored here int64_t fhash; // used to fetch the actual document and value for representation uint32_t doc_id = 0; uint32_t array_pos = 0; //for sorting based on other field int64_t sort_field_val; }; struct facet_stats_t { double fvmin = std::numeric_limits<double>::max(), fvmax = -std::numeric_limits<double>::max(), fvcount = 0, fvsum = 0; }; struct range_specs_t { std::string range_label; int64_t lower_range; bool is_in_range(int64_t key) { return key >= lower_range; } }; struct facet { const std::string field_name; spp::sparse_hash_map<uint64_t, facet_count_t> result_map; spp::sparse_hash_map<std::string, facet_count_t> value_result_map; // used for facet value query spp::sparse_hash_map<std::string, std::vector<std::string>> fvalue_tokens; spp::sparse_hash_map<uint64_t, std::vector<std::string>> hash_tokens; // used for faceting grouped results spp::sparse_hash_map<uint32_t, spp::sparse_hash_set<uint32_t>> hash_groups; facet_stats_t stats; //dictionary of key=>pair(range_id, range_val) std::map<int64_t, range_specs_t> facet_range_map; bool is_range_query; bool sampled = false; bool is_wildcard_match = false; bool is_intersected = false; bool is_sort_by_alpha = false; std::string sort_order=""; std::string sort_field=""; uint32_t orig_index; bool is_top_k = false; bool get_range(int64_t key, std::pair<int64_t, std::string>& range_pair) { if(facet_range_map.empty()) { LOG (ERROR) << "Facet range is not defined!!!"; } auto it = facet_range_map.lower_bound(key); if(it != facet_range_map.end() && it->first == key) { it++; } if(it != facet_range_map.end() && it->second.is_in_range(key)) { range_pair.first = it->first; range_pair.second = it->second.range_label; return true; } return false; } explicit facet(const std::string& field_name, uint32_t orig_index, bool is_top_k = false, std::map<int64_t, range_specs_t> facet_range = {}, bool is_range_q = false, bool sort_by_alpha=false, const std::string& order="", const std::string& sort_by_field="") : field_name(field_name), facet_range_map(facet_range), is_range_query(is_range_q), is_sort_by_alpha(sort_by_alpha), sort_order(order), sort_field(sort_by_field), orig_index(orig_index), is_top_k(is_top_k) { } }; struct facet_info_t { // facet hash => resolved tokens std::unordered_map<uint64_t, std::vector<std::string>> hashes; std::vector<std::vector<std::string>> fvalue_searched_tokens; bool use_facet_query = false; bool should_compute_stats = false; bool use_value_index = false; field facet_field{"", "", false}; }; struct facet_query_t { std::string field_name; std::string query; }; struct facet_value_t { std::string value; std::string highlighted; uint32_t count; int64_t sort_field_val; nlohmann::json parent; }; struct facet_hash_values_t { uint32_t length = 0; std::vector<uint32_t> hashes; facet_hash_values_t() { length = 0; } facet_hash_values_t(facet_hash_values_t&& hash_values) noexcept { length = hash_values.length; hashes = hash_values.hashes; hash_values.length = 0; hash_values.hashes.clear(); } facet_hash_values_t& operator=(facet_hash_values_t&& other) noexcept { if (this != &other) { hashes.clear(); hashes = other.hashes; length = other.length; other.hashes.clear(); other.length = 0; } return *this; } ~facet_hash_values_t() { hashes.clear(); } uint64_t size() const { return length; } uint64_t back() const { return hashes.back(); } };
26,164
C++
.h
644
32.498447
144
0.606953
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,837
number.h
typesense_typesense/include/number.h
#pragma once #include <sparsepp.h> /* struct number_t { bool is_float; int64_t intval; number_t(): is_float(false), intval(0) { } explicit number_t(float val): is_float(true), intval(*reinterpret_cast<int64_t*>(&val)) { } explicit number_t(int64_t val): is_float(false), intval(val) { } inline number_t& operator = (const float val) { intval = *reinterpret_cast<const int64_t*>(&val); is_float = true; return *this; } inline number_t& operator = (const int64_t & val) { intval = val; is_float = false; return *this; } inline bool operator == (const number_t & rhs) const { if(is_float) { return (*reinterpret_cast<const float*>(&intval)) == (*reinterpret_cast<const float*>(&rhs.intval)); } return intval == rhs.intval; } inline bool operator < (const number_t & rhs) const { if(is_float) { return (*reinterpret_cast<const float*>(&intval)) < (*reinterpret_cast<const float*>(&rhs.intval)); } return intval < rhs.intval; } inline bool operator > (const number_t & rhs) const { if(is_float) { return (*reinterpret_cast<const float*>(&intval)) > (*reinterpret_cast<const float*>(&rhs.intval)); } return intval > rhs.intval; } inline number_t operator * (const number_t & rhs) const { if(is_float) { return number_t((*reinterpret_cast<const float*>(&intval)) * (*reinterpret_cast<const float*>(&rhs.intval))); } return number_t(intval * rhs.intval); } inline number_t operator-() { if(is_float) { float floatval = *reinterpret_cast<float*>(&intval); floatval = -floatval; intval = *reinterpret_cast<int64_t *>(&floatval); } else { intval = -intval; } return *this; } };*/
1,948
C++
.h
57
26.578947
121
0.571733
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,838
http_server.h
typesense_typesense/include/http_server.h
#pragma once #define H2O_USE_LIBUV 0 extern "C" { #include "h2o.h" #include "h2o/http1.h" #include "h2o/http2.h" #include "h2o/multithread.h" } #include <map> #include <string> #include <cstdio> #include "http_data.h" #include "option.h" #include "threadpool.h" class ReplicationState; class HttpServer; struct h2o_custom_req_handler_t { h2o_handler_t super; HttpServer* http_server; }; struct h2o_custom_generator_t { h2o_generator_t h2o_generator; h2o_custom_req_handler_t* h2o_handler; route_path* rpath; std::shared_ptr<http_req> request; std::shared_ptr<http_res> response; std::shared_ptr<http_req>& req() { return request; } std::shared_ptr<http_res>& res() { return response; } }; struct deferred_req_res_t { const std::shared_ptr<http_req> req; const std::shared_ptr<http_res> res; HttpServer* server; // used to manage lifecycle of async actions bool destroy_after_use; deferred_req_res_t(const std::shared_ptr<http_req> &req, const std::shared_ptr<http_res> &res, HttpServer *server, bool destroy_after_use) : req(req), res(res), server(server), destroy_after_use(destroy_after_use) {} }; struct async_req_res_t { // NOTE: care must be taken to ensure that concurrent writes are protected as some fields are also used by http lib private: // not exposed or accessed, here only for reference counting const std::shared_ptr<http_req> req; const std::shared_ptr<http_res> res; public: // used to manage lifecycle of async actions const bool destroy_after_use; async_req_res_t(const std::shared_ptr<http_req>& h_req, const std::shared_ptr<http_res>& h_res, const bool destroy_after_use) : req(h_req), res(h_res), destroy_after_use(destroy_after_use) { std::shared_lock lk(res->mres); if(!res->is_alive || req->_req == nullptr || res->generator == nullptr) { return; } h2o_custom_generator_t* res_generator = static_cast<h2o_custom_generator_t*>(res->generator.load()); auto& res_state = req->res_state; res_state.set_req(h_req->is_diposed ? nullptr : h_req->_req); res_state.is_req_early_exit = (res_generator->rpath->async_req && res->final && !req->last_chunk_aggregate); res_state.send_state = res->final ? H2O_SEND_STATE_FINAL : H2O_SEND_STATE_IN_PROGRESS; res_state.generator = (res_generator == nullptr) ? nullptr : &res_generator->h2o_generator; res_state.set_response(res->status_code, res->content_type_header, res->body); } bool is_alive() { return res->is_alive; } void req_notify() { return req->notify(); } void res_notify() { return res->notify(); } stream_response_state_t& get_res_state() { return req->res_state; } }; struct defer_processing_t { const std::shared_ptr<http_req> req; const std::shared_ptr<http_res> res; size_t timeout_ms; HttpServer* server; defer_processing_t(const std::shared_ptr<http_req> &req, const std::shared_ptr<http_res> &res, size_t timeout_ms, HttpServer* server) : req(req), res(res), timeout_ms(timeout_ms), server(server) {} }; class HttpServer { private: h2o_globalconf_t config; h2o_compress_args_t compress_args; h2o_context_t ctx; h2o_accept_ctx_t* accept_ctx; h2o_hostconf_t *hostconf; h2o_socket_t* listener_socket; static const size_t ACTIVE_STREAM_WINDOW_SIZE = 196605; static const size_t REQ_TIMEOUT_MS = 60000; const uint64_t SSL_REFRESH_INTERVAL_MS; h2o_custom_timer_t ssl_refresh_timer; h2o_custom_timer_t metrics_refresh_timer; http_message_dispatcher* message_dispatcher; ReplicationState* replication_state; std::atomic<bool> exit_loop; std::string version; // must be a vector since order of routes entered matter std::vector<std::pair<uint64_t, route_path>> route_hash_to_path; // also have a hashmap for quick lookup of individual routes std::unordered_map<uint64_t, route_path> route_hash_to_path_map; const std::string listen_address; const uint32_t listen_port; std::string ssl_cert_path; std::string ssl_cert_key_path; bool cors_enabled; std::set<std::string> cors_domains; ThreadPool* thread_pool; ThreadPool* meta_thread_pool; bool (*auth_handler)(std::map<std::string, std::string>& params, std::vector<nlohmann::json>& embedded_params_vec, const std::string& body, const route_path& rpath, const std::string& auth_key); static void on_accept(h2o_socket_t *listener, const char *err); int setup_ssl(const char *cert_file, const char *key_file); static bool initialize_ssl_ctx(const char *cert_file, const char *key_file, h2o_accept_ctx_t* accept_ctx); static void on_ssl_refresh_timeout(h2o_timer_t *entry); static void on_ssl_ctx_delete_timeout(h2o_timer_t *entry); static void on_metrics_refresh_timeout(h2o_timer_t *entry); int create_listener(); h2o_pathconf_t *register_handler(h2o_hostconf_t *hostconf, const char *path, int (*on_req)(h2o_handler_t *, h2o_req_t *)); static int catch_all_handler(h2o_handler_t *_h2o_handler, h2o_req_t *req); static void response_proceed(h2o_generator_t *generator, h2o_req_t *req); static void response_abort(h2o_generator_t *generator, h2o_req_t *req); static void on_res_generator_dispose(void *self); static int send_response(h2o_req_t *req, int status_code, const std::string & message); static int async_req_cb(void *ctx, int is_end_stream); static bool is_write_request(const std::string& root_resource, const std::string& http_method, bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&)); static void handle_gzip(const std::shared_ptr<http_req>& request); public: HttpServer(const std::string & version, const std::string & listen_address, uint32_t listen_port, const std::string & ssl_cert_path, const std::string & ssl_cert_key_path, const uint64_t ssl_refresh_interval_ms, bool cors_enabled, const std::set<std::string>& cors_domains, ThreadPool* thread_pool); ~HttpServer(); http_message_dispatcher* get_message_dispatcher() const; ReplicationState* get_replication_state() const; bool is_alive() const; bool is_leader() const; uint64_t node_state() const; nlohmann::json node_status(); void set_auth_handler(bool (*handler)(std::map<std::string, std::string>& params, std::vector<nlohmann::json>& embedded_params_vec, const std::string& body, const route_path & rpath, const std::string & auth_key)); void get(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res), bool async_req=false, bool async_res=false); void post(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res), bool async_req=false, bool async_res=false); void put(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res), bool async_req=false, bool async_res=false); void patch(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res), bool async_req=false, bool async_res=false); void del(const std::string & path, bool (*handler)(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res), bool async_req=false, bool async_res=false); void on(const std::string & message, bool (*handler)(void*)); void send_message(const std::string & type, void* data); static void stream_response(stream_response_state_t& state); uint64_t find_route(const std::vector<std::string> & path_parts, const std::string & http_method, route_path** found_rpath); bool get_route(uint64_t hash, route_path** found_rpath); int run(ReplicationState* replication_state); void stop(); bool has_exited() const; void clear_timeouts(const std::vector<h2o_timer_t*> & timers, bool trigger_callback = true); static bool on_stop_server(void *data); static bool on_stream_response_message(void *data); static bool on_request_proceed_message(void *data); static bool on_deferred_processing_message(void *data); std::string get_version(); ThreadPool* get_thread_pool() const; ThreadPool* get_meta_thread_pool() const; static constexpr const char* STOP_SERVER_MESSAGE = "STOP_SERVER"; static constexpr const char* STREAM_RESPONSE_MESSAGE = "STREAM_RESPONSE"; static constexpr const char* REQUEST_PROCEED_MESSAGE = "REQUEST_PROCEED"; static constexpr const char* DEFER_PROCESSING_MESSAGE = "DEFER_PROCESSING"; static int process_request(const std::shared_ptr<http_req>& request, const std::shared_ptr<http_res>& response, route_path *rpath, const h2o_custom_req_handler_t *req_handler, bool use_meta_thread_pool); static void on_deferred_process_request(h2o_timer_t *entry); void defer_processing(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res, size_t timeout_ms); void do_snapshot(const std::string& snapshot_path, const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool trigger_vote(); bool reset_peers(); void persist_applying_index(); int64_t get_num_queued_writes(); void decr_pending_writes(); };
9,978
C++
.h
196
43.47449
178
0.666322
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,839
sorted_array.h
typesense_typesense/include/sorted_array.h
#pragma once #include <stdio.h> #include <cstdlib> #include <for.h> #include <cstring> #include <vector> #include <limits> #include <iostream> #include "array_base.h" #include "logger.h" class sorted_array: public array_base { private: uint32_t inline sorted_append_size_required(uint32_t value, uint32_t new_length) { uint32_t m = std::min(min, value); uint32_t M = std::max(max, value); uint32_t bnew = required_bits(M - m); uint32_t size_bits = for_compressed_size_bits(new_length, bnew); /*if(new_length == 15) { LOG(INFO) << "value: " << value << ", m: " << m << ", M: " << M << ", bnew: " << bnew << ", size_bits: " << size_bits; }*/ return METADATA_OVERHEAD + 4 + size_bits; } uint32_t lower_bound_search_bits(const uint8_t *in, uint32_t imin, uint32_t imax, uint32_t base, uint32_t bits, uint32_t value, uint32_t *actual); uint32_t lower_bound_search(const uint32_t *in, uint32_t imin, uint32_t imax, uint32_t value, uint32_t *actual); void binary_search_indices(const uint32_t *values, int low_vindex, int high_vindex, int low_index, int high_index, uint32_t base, uint32_t bits, uint32_t *indices); void binary_search_indices(const uint32_t *values, int low_vindex, int high_vindex, int low_index, int high_index, uint32_t *indices); void binary_count_indices(const uint32_t *values, int low_vindex, int high_vindex, int low_index, int high_index, uint32_t base, uint32_t bits, size_t& num_found); void binary_count_indices(const uint32_t *values, int low_vindex, int high_vindex, const uint32_t* src, int low_index, int high_index, size_t& num_found); public: void load(const uint32_t *sorted_array, const uint32_t array_length); uint32_t at(uint32_t index); uint32_t last(); bool contains(uint32_t value); uint32_t indexOf(uint32_t value); void indexOf(const uint32_t *values, size_t values_len, uint32_t* indices); size_t numFoundOf(const uint32_t *values, const size_t values_len); // returns false if malloc fails size_t append(uint32_t value); bool insert(size_t index, uint32_t value); void remove_value(uint32_t value); void remove_values(uint32_t *sorted_values, uint32_t sorted_values_length); };
2,562
C++
.h
51
40.509804
101
0.611736
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,841
validator.h
typesense_typesense/include/validator.h
#pragma once #include "option.h" #include <cctype> #include "json.hpp" #include "tsl/htrie_map.h" #include "field.h" class validator_t { public: static Option<uint32_t> validate_index_in_memory(nlohmann::json &document, uint32_t seq_id, const std::string & default_sorting_field, const tsl::htrie_map<char, field> & search_schema, const tsl::htrie_map<char, field> & embedding_fields, const index_operation_t op, const bool is_update, const std::string& fallback_field_type, const DIRTY_VALUES& dirty_values, const bool validate_embedding_fields = true); static Option<uint32_t> coerce_element(const field& a_field, nlohmann::json& document, nlohmann::json& doc_ele, const std::string& fallback_field_type, const DIRTY_VALUES& dirty_values); static Option<uint32_t> coerce_string(const DIRTY_VALUES& dirty_values, const std::string& fallback_field_type, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased); static Option<uint32_t> coerce_int32_t(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased); static Option<uint32_t> coerce_int64_t(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased); static Option<uint32_t> coerce_float(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased); static Option<uint32_t> coerce_bool(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased); static Option<uint32_t> coerce_geopoint(const DIRTY_VALUES& dirty_values, const field& a_field, nlohmann::json &document, const std::string &field_name, nlohmann::json& lat, nlohmann::json& lng, nlohmann::json::iterator& array_iter, bool is_array, bool& array_ele_erased); static Option<bool> validate_embed_fields(const nlohmann::json& document, const tsl::htrie_map<char, field>& embedding_fields, const tsl::htrie_map<char, field> & search_schema, const bool& is_update); };
3,794
C++
.h
48
49.083333
132
0.489288
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,842
http_data.h
typesense_typesense/include/http_data.h
#pragma once #include <cstdint> #include <string> #include <map> #include <vector> #include <future> #include <chrono> #include <iomanip> #include "json.hpp" #include "string_utils.h" #include "logger.h" #include "app_metrics.h" #include "tsconfig.h" #include "zlib.h" #define H2O_USE_LIBUV 0 extern "C" { #include "h2o.h" } using TimePoint = std::chrono::high_resolution_clock::time_point; struct h2o_custom_timer_t { h2o_timer_t timer; void *data; h2o_custom_timer_t(): data(nullptr) {} explicit h2o_custom_timer_t(void *data): data(data) { } }; enum class ROUTE_CODES { NOT_FOUND = 1, ALREADY_HANDLED = 2, }; struct http_res { uint32_t status_code; std::string content_type_header; std::string body; std::atomic<bool> final; std::shared_mutex mres; std::atomic<bool> is_alive; std::atomic<void*> generator = nullptr; // indicates whether follower is proxying this response stream from leader bool proxied_stream = false; std::mutex mcv; std::condition_variable cv; bool ready; http_res(void* generator): status_code(0), content_type_header("application/json; charset=utf-8"), final(true), is_alive(generator != nullptr), generator(generator), ready(false) { } ~http_res() { //LOG(INFO) << "~http_res " << this; } void set_content(uint32_t status_code, const std::string& content_type_header, const std::string& body, const bool final) { this->status_code = status_code; this->content_type_header = content_type_header; this->body = body; this->final = final; } void wait() { auto lk = std::unique_lock<std::mutex>(mcv); cv.wait(lk, [&] { return ready; }); ready = false; } void notify() { // Ideally we don't need lock over notify but it is needed here because // the parent object could be deleted after lock on mutex is released but // before notify can be called on condition variable. std::lock_guard<std::mutex> lk(mcv); ready = true; cv.notify_all(); } static const char* get_status_reason(uint32_t status_code) { switch(status_code) { case 200: return "OK"; case 201: return "Created"; case 400: return "Bad Request"; case 401: return "Unauthorized"; case 403: return "Forbidden"; case 404: return "Not Found"; case 405: return "Not Allowed"; case 409: return "Conflict"; case 422: return "Unprocessable Entity"; case 429: return "Too Many Requests"; case 500: return "Internal Server Error"; default: return ""; } } void set_200(const std::string & res_body) { status_code = 200; body = res_body; } void set_201(const std::string & res_body) { status_code = 201; body = res_body; } void set_400(const std::string & message) { status_code = 400; body = "{\"message\": \"" + message + "\"}"; } void set_401(const std::string & message) { status_code = 400; body = "{\"message\": \"" + message + "\"}"; } void set_403() { status_code = 403; body = "{\"message\": \"Forbidden\"}"; } void set_404() { status_code = 404; body = "{\"message\": \"Not Found\"}"; } void set_405(const std::string & message) { status_code = 405; body = "{\"message\": \"" + message + "\"}"; } void set_409(const std::string & message) { status_code = 409; body = "{\"message\": \"" + message + "\"}"; } void set_422(const std::string & message) { status_code = 422; body = "{\"message\": \"" + message + "\"}"; } void set_500(const std::string & message) { status_code = 500; body = "{\"message\": \"" + message + "\"}"; } void set_503(const std::string & message) { status_code = 503; body = "{\"message\": \"" + message + "\"}"; } void set(uint32_t code, const std::string & message) { status_code = code; body = "{\"message\": \"" + message + "\"}"; } void set_body(uint32_t code, const std::string & message) { status_code = code; body = message; } }; struct cached_res_t { uint32_t status_code; std::string content_type_header; std::string body; TimePoint created_at; uint32_t ttl; uint64_t hash; bool operator == (const cached_res_t& res) const { return hash == res.hash; } bool operator != (const cached_res_t& res) const { return hash != res.hash; } void load(uint32_t status_code, const std::string& content_type_header, const std::string& body, const TimePoint created_at, const uint32_t ttl, uint64_t hash) { this->status_code = status_code; this->content_type_header = content_type_header; this->body = body; this->created_at = created_at; this->ttl = ttl; this->hash = hash; } }; struct ip_addr_str_t { static const size_t IP_MAX_LEN = 64; char ip[IP_MAX_LEN]; }; struct req_state_t { public: virtual ~req_state_t() = default; }; struct stream_response_state_t { private: h2o_req_t* req = nullptr; public: bool is_req_early_exit = false; bool is_res_start = true; h2o_send_state_t send_state = H2O_SEND_STATE_IN_PROGRESS; std::string res_body; h2o_iovec_t res_buff; std::string res_content_type; int status = 0; const char* reason = nullptr; h2o_generator_t* generator = nullptr; void set_response(uint32_t status_code, const std::string& content_type, std::string& body) { std::string().swap(res_body); res_body = std::move(body); res_buff = h2o_iovec_t{.base = res_body.data(), .len = res_body.size()}; if(is_res_start) { res_content_type = std::move(content_type); status = (int)status_code; reason = http_res::get_status_reason(status_code); is_res_start = false; } } void set_req(h2o_req_t* _req) { req = _req; } h2o_req_t* get_req() { return req; } }; struct http_req { static constexpr const char* AUTH_HEADER = "x-typesense-api-key"; static constexpr const char* USER_HEADER = "x-typesense-user-id"; static constexpr const char* AGENT_HEADER = "user-agent"; static constexpr const char* CONTENT_TYPE_HEADER = "content-type"; static constexpr const char* OCTET_STREAM_HEADER_VALUE = "application/octet-stream"; h2o_req_t* _req; std::string http_method; std::string path_without_query; uint64_t route_hash; std::map<std::string, std::string> params; std::vector<nlohmann::json> embedded_params_vec; std::string api_auth_key; bool first_chunk_aggregate; std::atomic<bool> last_chunk_aggregate; size_t chunk_len; std::string body; size_t body_index; std::string metadata; req_state_t* data; // for deffered processing of async handlers h2o_custom_timer_t defer_timer; uint64_t start_ts; // timestamp from the underlying http library uint64_t conn_ts; // was the request aborted *without a result* because of wait time exceeding search cutoff threshold? bool overloaded = false; std::mutex mcv; std::condition_variable cv; bool ready; int64_t log_index; std::atomic<bool> is_diposed; std::string client_ip = "0.0.0.0"; z_stream zs; bool zstream_initialized = false; // stores http lib related datastructures to avoid race conditions between indexing and http write threads stream_response_state_t res_state; bool is_binary_body = false; http_req(): _req(nullptr), route_hash(1), first_chunk_aggregate(true), last_chunk_aggregate(false), chunk_len(0), body_index(0), data(nullptr), ready(false), log_index(0), is_diposed(false) { start_ts = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count(); conn_ts = start_ts; } http_req(h2o_req_t* _req, const std::string & http_method, const std::string & path_without_query, uint64_t route_hash, const std::map<std::string, std::string>& params, std::vector<nlohmann::json>& embedded_params_vec, const std::string& api_auth_key, const std::string& body, const std::string& client_ip, bool is_binary_body): _req(_req), http_method(http_method), path_without_query(path_without_query), route_hash(route_hash), params(params), embedded_params_vec(embedded_params_vec), api_auth_key(api_auth_key), first_chunk_aggregate(true), last_chunk_aggregate(false), chunk_len(0), body(body), body_index(0), data(nullptr), ready(false), log_index(0), is_diposed(false), client_ip(client_ip), is_binary_body(is_binary_body) { if(_req != nullptr) { const auto& tv = _req->processed_at.at; conn_ts = (tv.tv_sec * 1000 * 1000) + tv.tv_usec; } else { conn_ts = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count(); } start_ts = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count(); } ~http_req() { //LOG(INFO) << "~http_req " << this; if(_req != nullptr) { Config& config = Config::get_instance(); uint64_t now = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count(); uint64_t ms_since_start = (now - start_ts) / 1000; const std::string metric_identifier = http_method + " " + path_without_query; AppMetrics::get_instance().increment_duration(metric_identifier, ms_since_start); AppMetrics::get_instance().increment_write_metrics(route_hash, ms_since_start); bool log_slow_searches = config.get_log_slow_searches_time_ms() >= 0 && int(ms_since_start) >= config.get_log_slow_searches_time_ms() && (path_without_query == "/multi_search" || StringUtils::ends_with(path_without_query, "/documents/search")); bool log_slow_requests = config.get_log_slow_requests_time_ms() >= 0 && int(ms_since_start) >= config.get_log_slow_requests_time_ms(); if(overloaded) { AppMetrics::get_instance().increment_count(AppMetrics::OVERLOADED_LABEL, 1); } else if(log_slow_searches || log_slow_requests) { // log slow request if logging is enabled bool is_multi_search_query = (path_without_query == "/multi_search"); std::string query_string = "?"; if(is_multi_search_query) { StringUtils::erase_char(body, '\n'); } for(const auto& kv: params) { if(kv.first != AUTH_HEADER) { query_string += kv.first + "=" + kv.second + "&"; } } std::string full_url_path = metric_identifier + query_string; // NOTE: we log the `body` ONLY for multi-search query LOG(INFO) << "event=slow_request, time=" << ms_since_start << " ms" << ", client_ip=" << client_ip << ", endpoint=" << full_url_path << ", body=" << (is_multi_search_query ? body : ""); } } delete data; data = nullptr; if(zstream_initialized) { zstream_initialized = false; inflateEnd(&zs); } } void wait() { auto lk = std::unique_lock<std::mutex>(mcv); cv.wait(lk, [&] { return ready; }); ready = false; } void notify() { // Ideally we don't need lock over notify but it is needed here because // the parent object could be deleted after lock on mutex is released but // before notify can be called on condition variable. std::lock_guard<std::mutex> lk(mcv); ready = true; cv.notify_all(); } // NOTE: we don't ser/de all fields, only ones needed for write forwarding // Take care to check for existence of key to ensure backward compatibility during upgrade void load_from_json(const std::string& json_str) { nlohmann::json j = nlohmann::json::parse(json_str); route_hash = j["route_hash"]; std::string chunk_body; is_binary_body = j.count("is_binary_body") != 0 ? j["is_binary_body"].get<bool>() : false; if (is_binary_body) { chunk_body = StringUtils::base64_decode(j["body"]); } else { chunk_body = j["body"]; } if(start_ts == 0) { // Serialized request from an older version (v0.21 and below) which serializes import data differently. body = chunk_body; } else { body += chunk_body; } for (nlohmann::json::iterator it = j["params"].begin(); it != j["params"].end(); ++it) { params.emplace(it.key(), it.value()); } metadata = j.count("metadata") != 0 ? j["metadata"] : ""; first_chunk_aggregate = j.count("first_chunk_aggregate") != 0 ? j["first_chunk_aggregate"].get<bool>() : true; last_chunk_aggregate = j.count("last_chunk_aggregate") != 0 ? j["last_chunk_aggregate"].get<bool>() : false; start_ts = j.count("start_ts") != 0 ? j["start_ts"].get<uint64_t>() : 0; log_index = j.count("log_index") != 0 ? j["log_index"].get<int64_t>() : 0; } std::string to_json() const { nlohmann::json j; j["route_hash"] = route_hash; j["params"] = params; j["first_chunk_aggregate"] = first_chunk_aggregate; j["last_chunk_aggregate"] = last_chunk_aggregate.load(); j["body"] = body; j["metadata"] = metadata; j["start_ts"] = start_ts; j["log_index"] = log_index; j["is_binary_body"] = is_binary_body; if (is_binary_body) { j["body"] = StringUtils::base64_encode(body); } const std::string j_dump = j.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore); return j_dump; } static ip_addr_str_t get_ip_addr(h2o_req_t* h2o_req) { ip_addr_str_t ip_addr; sockaddr sa; if(0 != h2o_req->conn->callbacks->get_peername(h2o_req->conn, &sa)) { StringUtils::get_ip_str(&sa, ip_addr.ip, ip_addr.IP_MAX_LEN); } else { strcpy(ip_addr.ip, "0.0.0.0"); } return ip_addr; } bool do_resource_check(); }; struct route_path { std::string http_method; std::vector<std::string> path_parts; bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&) = nullptr; bool async_req; bool async_res; std::string action; route_path(const std::string &httpMethod, const std::vector<std::string> &pathParts, bool (*handler)(const std::shared_ptr<http_req>&, const std::shared_ptr<http_res>&), bool async_req, bool async_res) : http_method(httpMethod), path_parts(pathParts), handler(handler), async_req(async_req), async_res(async_res) { action = _get_action(); if(async_req) { // once a request is async, response also needs to be async this->async_res = true; } } inline bool operator< (const route_path& rhs) const { return true; } uint64_t route_hash() { std::string path = StringUtils::join(path_parts, "/"); std::string method_path = http_method + path; uint64_t hash = StringUtils::hash_wy(method_path.c_str(), method_path.size()); return (hash > 100) ? hash : (hash + 100); // [0-99] reserved for special codes } std::string _get_action(); }; struct h2o_custom_res_message_t { h2o_multithread_message_t super; std::map<std::string, bool (*)(void*)> *message_handlers; std::string type; void* data; }; struct http_message_dispatcher { h2o_multithread_queue_t* message_queue; h2o_multithread_receiver_t* message_receiver; std::map<std::string, bool (*)(void*)> message_handlers; void init(h2o_loop_t *loop) { message_queue = h2o_multithread_create_queue(loop); message_receiver = new h2o_multithread_receiver_t(); h2o_multithread_register_receiver(message_queue, message_receiver, on_message); } ~http_message_dispatcher() { // drain existing messages on_message(message_receiver, &message_receiver->_messages); h2o_multithread_unregister_receiver(message_queue, message_receiver); h2o_multithread_destroy_queue(message_queue); delete message_receiver; } static void on_message(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages) { while (!h2o_linklist_is_empty(messages)) { h2o_multithread_message_t *message = H2O_STRUCT_FROM_MEMBER(h2o_multithread_message_t, link, messages->next); h2o_custom_res_message_t *custom_message = reinterpret_cast<h2o_custom_res_message_t*>(message); const std::map<std::string, bool (*)(void*)>::const_iterator handler_itr = custom_message->message_handlers->find(custom_message->type); if(handler_itr != custom_message->message_handlers->end()) { auto handler = handler_itr->second; (handler)(custom_message->data); } h2o_linklist_unlink(&message->link); delete custom_message; } } void send_message(const std::string & type, void* data) { h2o_custom_res_message_t* message = new h2o_custom_res_message_t{{{nullptr, nullptr}}, &message_handlers, type, data}; h2o_multithread_send_message(message_receiver, &message->super); } void on(const std::string & message, bool (*handler)(void*)) { message_handlers.emplace(message, handler); } }; struct async_stream_response_t { std::vector<std::string> response_chunks; std::mutex mutex; std::condition_variable cv; bool ready = false; };
18,757
C++
.h
454
32.971366
133
0.592489
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,843
image_processor.h
typesense_typesense/include/image_processor.h
#pragma once #include <mutex> #include <vector> #include <core/session/onnxruntime_cxx_api.h> #include "string_utils.h" #include "option.h" // processed_image_t is 4D vector of floats using processed_image_t = std::vector<float>; class ImageProcessor { public: virtual ~ImageProcessor() = default; virtual Option<processed_image_t> process_image(const std::string& image) = 0; }; class CLIPImageProcessor : public ImageProcessor { private: Ort::Env env_; std::unique_ptr<Ort::Session> session_; std::mutex mutex_; public: CLIPImageProcessor(const std::string& model_path); Option<processed_image_t> process_image(const std::string& image) override; };
727
C++
.h
22
28.636364
86
0.702857
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,844
filter.h
typesense_typesense/include/filter.h
#pragma once #include <string> #include <map> #include "tsl/htrie_map.h" #include "json.hpp" #include "store.h" constexpr uint32_t COMPUTE_FILTER_ITERATOR_THRESHOLD = 25'000; constexpr size_t DEFAULT_FILTER_BY_CANDIDATES = 4; enum NUM_COMPARATOR { LESS_THAN, LESS_THAN_EQUALS, EQUALS, NOT_EQUALS, CONTAINS, GREATER_THAN, GREATER_THAN_EQUALS, RANGE_INCLUSIVE }; enum FILTER_OPERATOR { AND, OR }; struct filter_node_t; struct field; struct filter { std::string field_name; std::vector<std::string> values; std::vector<NUM_COMPARATOR> comparators; // Would be set when `field: != ...` is encountered with id/string field or `field: != [ ... ]` is encountered in the // case of int and float fields. During filtering, all the results of matching the field against the values are // aggregated and then this flag is checked if negation on the aggregated result is required. bool apply_not_equals = false; // Would store `Foo` in case of a filter expression like `$Foo(bar := baz)` std::string referenced_collection_name; std::vector<nlohmann::json> params; /// For searching places within a given radius of a given latlong (mi for miles and km for kilometers) static constexpr const char* GEO_FILTER_RADIUS_KEY = "radius"; /// Radius threshold beyond which exact filtering on geo_result_ids will not be done. static constexpr const char* EXACT_GEO_FILTER_RADIUS_KEY = "exact_filter_radius"; static constexpr double DEFAULT_EXACT_GEO_FILTER_RADIUS_VALUE = 10000; // meters static const std::string RANGE_OPERATOR() { return ".."; } static Option<bool> validate_numerical_filter_value(field _field, const std::string& raw_value); static Option<NUM_COMPARATOR> extract_num_comparator(std::string & comp_and_value); static Option<bool> parse_geopoint_filter_value(std::string& raw_value, const std::string& format_err_msg, std::string& processed_filter_val, NUM_COMPARATOR& num_comparator); static Option<bool> parse_geopoint_filter_value(std::string& raw_value, const std::string& format_err_msg, filter& filter_exp); static Option<bool> parse_filter_query(const std::string& filter_query, const tsl::htrie_map<char, field>& search_schema, const Store* store, const std::string& doc_id_prefix, filter_node_t*& root); }; struct filter_node_t { filter filter_exp; FILTER_OPERATOR filter_operator = AND; bool isOperator = false; filter_node_t* left = nullptr; filter_node_t* right = nullptr; std::string filter_query; filter_node_t() = default; explicit filter_node_t(filter filter_exp) : filter_exp(std::move(filter_exp)), isOperator(false), left(nullptr), right(nullptr) {} filter_node_t(FILTER_OPERATOR filter_operator, filter_node_t* left, filter_node_t* right) : filter_operator(filter_operator), isOperator(true), left(left), right(right) {} ~filter_node_t() { delete left; delete right; } filter_node_t& operator=(filter_node_t&& obj) noexcept { if (&obj == this) { return *this; } if (obj.isOperator) { isOperator = true; filter_operator = obj.filter_operator; left = obj.left; right = obj.right; obj.left = nullptr; obj.right = nullptr; } else { isOperator = false; filter_exp = obj.filter_exp; } return *this; } };
4,075
C++
.h
100
30.14
121
0.586329
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,845
raft_server.h
typesense_typesense/include/raft_server.h
#pragma once #include <brpc/controller.h> // brpc::Controller #include <brpc/server.h> // brpc::Server #include <braft/raft.h> // braft::Node braft::StateMachine #include <braft/storage.h> // braft::SnapshotWriter #include <braft/util.h> // braft::AsyncClosureGuard #include <braft/protobuf_file.h> // braft::ProtoBufFile #include <rocksdb/db.h> #include <future> #include "http_data.h" #include "threadpool.h" #include "http_server.h" #include "batched_indexer.h" #include "cached_resource_stat.h" class Store; class ReplicationState; // Implements the callback for the state machine class ReplicationClosure : public braft::Closure { private: const std::shared_ptr<http_req> request; const std::shared_ptr<http_res> response; public: ReplicationClosure(const std::shared_ptr<http_req>& request, const std::shared_ptr<http_res>& response): request(request), response(response) { } ~ReplicationClosure() { //LOG(INFO) << "~ReplicationClosure req use count " << request.use_count(); } const std::shared_ptr<http_req>& get_request() const { return request; } const std::shared_ptr<http_res>& get_response() const { return response; } void Run(); }; // Closure that fires when refresh nodes operation finishes class RefreshNodesClosure : public braft::Closure { public: RefreshNodesClosure() {} ~RefreshNodesClosure() {} void Run() { // Auto delete this after Run() std::unique_ptr<RefreshNodesClosure> self_guard(this); if(status().ok()) { LOG(INFO) << "Peer refresh succeeded!"; } else { LOG(ERROR) << "Peer refresh failed, error: " << status().error_str(); } } }; // Closure that fires when requested class OnDemandSnapshotClosure : public braft::Closure { private: ReplicationState* replication_state; const std::shared_ptr<http_req> req; const std::shared_ptr<http_res> res; public: OnDemandSnapshotClosure(ReplicationState *replication_state, const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) : replication_state(replication_state), req(req), res(res) {} ~OnDemandSnapshotClosure() {} void Run(); }; class TimedSnapshotClosure : public braft::Closure { private: ReplicationState* replication_state; public: TimedSnapshotClosure(ReplicationState *replication_state) : replication_state(replication_state){} ~TimedSnapshotClosure() {} void Run(); }; // Implements braft::StateMachine. class ReplicationState : public braft::StateMachine { private: static constexpr const char* db_snapshot_name = "db_snapshot"; static constexpr const char* analytics_db_snapshot_name = "analytics_db_snapshot"; static constexpr const char* BATCHED_INDEXER_STATE_KEY = "$BI"; mutable std::shared_mutex node_mutex; braft::Node* volatile node; butil::atomic<int64_t> leader_term; HttpServer* server; BatchedIndexer* batched_indexer; Store* store; Store* analytics_store; ThreadPool* thread_pool; http_message_dispatcher* message_dispatcher; const bool api_uses_ssl; const Config* config; const size_t num_collections_parallel_load; const size_t num_documents_parallel_load; std::atomic<bool> read_caught_up; std::atomic<bool> write_caught_up; std::string raft_dir_path; std::string ext_snapshot_path; std::atomic<bool> ext_snapshot_succeeded; int election_timeout_interval_ms; std::mutex mcv; std::condition_variable cv; bool ready; std::atomic<bool> shutting_down; std::atomic<size_t> pending_writes; std::atomic<size_t> snapshot_in_progress; const uint64_t snapshot_interval_s; // frequency of actual snapshotting uint64_t last_snapshot_ts; // when last snapshot ran butil::EndPoint peering_endpoint; public: static constexpr const char* log_dir_name = "log"; static constexpr const char* meta_dir_name = "meta"; static constexpr const char* snapshot_dir_name = "snapshot"; ReplicationState(HttpServer* server, BatchedIndexer* batched_indexer, Store* store, Store* analytics_store, ThreadPool* thread_pool, http_message_dispatcher* message_dispatcher, bool api_uses_ssl, const Config* config, size_t num_collections_parallel_load, size_t num_documents_parallel_load); // Starts this node int start(const butil::EndPoint & peering_endpoint, int api_port, int election_timeout_ms, int snapshot_max_byte_count_per_rpc, const std::string & raft_dir, const std::string & nodes, const std::atomic<bool>& quit_abruptly); // Generic write method for synchronizing all writes void write(const std::shared_ptr<http_req>& request, const std::shared_ptr<http_res>& response); // Generic read method for consistent reads, not used for now void read(const std::shared_ptr<http_res>& response); // updates cluster membership void refresh_nodes(const std::string & nodes, const size_t raft_counter, const std::atomic<bool>& reset_peers_on_error); void refresh_catchup_status(bool log_msg); bool trigger_vote(); bool reset_peers(); bool has_leader_term() const { return leader_term.load(butil::memory_order_acquire) > 0; } bool is_read_caught_up() const { return read_caught_up; } bool is_write_caught_up() const { return write_caught_up; } bool is_alive() const; uint64_t node_state() const; // Shut this node down. void shutdown(); int init_db(); Store* get_store(); // for manual / external snapshots void do_snapshot(const std::string& snapshot_path, const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); static std::string to_nodes_config(const butil::EndPoint &peering_endpoint, const int api_port, const std::string &nodes_config); void set_ext_snapshot_path(const std::string &snapshot_path); bool get_ext_snapshot_succeeded(); const std::string& get_ext_snapshot_path() const; // for timed snapshots void do_snapshot(const std::string& nodes); void persist_applying_index(); http_message_dispatcher* get_message_dispatcher() const; void wait() { auto lk = std::unique_lock<std::mutex>(mcv); cv.wait(lk, [&] { return ready; }); ready = false; } void notify() { std::lock_guard<std::mutex> lk(mcv); ready = true; cv.notify_all(); } static std::string resolve_node_hosts(const std::string& nodes_config); int64_t get_num_queued_writes(); bool is_leader(); nlohmann::json get_status(); std::string get_leader_url() const; static Option<bool> handle_gzip(const std::shared_ptr<http_req>& request); void decr_pending_writes(); private: friend class ReplicationClosure; // actual application of writes onto the WAL void on_apply(braft::Iterator& iter); struct SnapshotArg { ReplicationState* replication_state; braft::SnapshotWriter* writer; std::string state_dir_path; std::string db_snapshot_path; std::string analytics_db_snapshot_path; std::string ext_snapshot_path; braft::Closure* done; }; static void *save_snapshot(void* arg); void on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done); int on_snapshot_load(braft::SnapshotReader* reader); void on_leader_start(int64_t term) { leader_term.store(term, butil::memory_order_release); LOG(INFO) << "Node becomes leader, term: " << term; } void on_leader_stop(const butil::Status& status) { leader_term.store(-1, butil::memory_order_release); LOG(INFO) << "Node stepped down : " << status; } void on_shutdown() { LOG(INFO) << "This node is down"; } void on_error(const ::braft::Error& e) { LOG(ERROR) << "Met peering error " << e; } void on_configuration_committed(const ::braft::Configuration& conf) { LOG(INFO) << "Configuration of this group is " << conf; } void on_start_following(const ::braft::LeaderChangeContext& ctx) { refresh_catchup_status(true); LOG(INFO) << "Node starts following " << ctx; } void on_stop_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Node stops following " << ctx; } void write_to_leader(const std::shared_ptr<http_req>& request, const std::shared_ptr<http_res>& response); void do_dummy_write(); std::string get_node_url_path(const std::string& node_addr, const std::string& path, const std::string& protocol) const; };
9,004
C++
.h
216
35.388889
147
0.666398
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,846
or_iterator.h
typesense_typesense/include/or_iterator.h
#pragma once #include <vector> #include <filter_result_iterator.h> #include "posting_list.h" /* * Takes a list of posting list iterators and returns an unique OR sequence of elements lazily */ class or_iterator_t { private: std::vector<posting_list_t::iterator_t> its; int curr_index = 0; void advance_smallest(); public: explicit or_iterator_t(std::vector<posting_list_t::iterator_t>& its); or_iterator_t(or_iterator_t&& rhs) noexcept; or_iterator_t& operator=(or_iterator_t&& rhs) noexcept; ~or_iterator_t() noexcept; // utility methods for manipulating groups of iterators static bool at_end(const std::vector<or_iterator_t>& its); static bool at_end2(const std::vector<or_iterator_t>& its); static bool equals(std::vector<or_iterator_t>& its); static bool equals2(std::vector<or_iterator_t>& its); static void advance_all(std::vector<or_iterator_t>& its); static void advance_all2(std::vector<or_iterator_t>& its); static void advance_non_largest(std::vector<or_iterator_t>& its); static void advance_non_largest2(std::vector<or_iterator_t>& its); // actual iterator operations [[nodiscard]] bool valid() const; bool next(); bool skip_to(uint32_t id); [[nodiscard]] uint32_t id() const; [[nodiscard]] const std::vector<posting_list_t::iterator_t>& get_its() const; static bool take_id(result_iter_state_t& istate, uint32_t id, bool& is_excluded); static bool take_id(result_iter_state_t& istate, uint32_t id, bool& is_excluded, single_filter_result_t& filter_result); template<class T> static bool intersect(std::vector<or_iterator_t>& its, result_iter_state_t& istate, T func); static bool contains_atleast_one(std::vector<or_iterator_t>& its, result_iter_state_t&& istate); }; template<class T> bool or_iterator_t::intersect(std::vector<or_iterator_t>& its, result_iter_state_t& istate, T func) { size_t it_size = its.size(); bool is_excluded; size_t num_processed = 0; switch (its.size()) { case 0: break; case 1: if(istate.is_filter_provided() && istate.is_filter_valid()) { its[0].skip_to(istate.get_filter_id()); } while(its.size() == it_size && its[0].valid()) { num_processed++; if (num_processed % 65536 == 0 && (std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us) { search_cutoff = true; break; } auto id = its[0].id(); istate.num_keyword_matches++; single_filter_result_t filter_result; if(take_id(istate, id, is_excluded, filter_result)) { func(filter_result, its); } if(istate.is_filter_provided() && !is_excluded) { if(istate.is_filter_valid()) { // skip iterator till next id available in filter its[0].skip_to(istate.get_filter_id()); } else { break; } } else { its[0].next(); } } break; case 2: if(istate.is_filter_provided() && istate.is_filter_valid()) { its[0].skip_to(istate.get_filter_id()); its[1].skip_to(istate.get_filter_id()); } while(its.size() == it_size && !at_end2(its)) { num_processed++; if (num_processed % 65536 == 0 && (std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us) { search_cutoff = true; break; } if(equals2(its)) { auto id = its[0].id(); istate.num_keyword_matches++; single_filter_result_t filter_result; if(take_id(istate, id, is_excluded, filter_result)) { func(filter_result, its); } if(istate.is_filter_provided() != 0 && !is_excluded) { if(istate.is_filter_valid()) { // skip iterator till next id available in filter its[0].skip_to(istate.get_filter_id()); its[1].skip_to(istate.get_filter_id()); } else { break; } } else { advance_all2(its); } } else { advance_non_largest2(its); } } break; default: if(istate.is_filter_provided() && istate.is_filter_valid()) { for(auto& it: its) { it.skip_to(istate.get_filter_id()); } } while(its.size() == it_size && !at_end(its)) { num_processed++; if (num_processed % 65536 == 0 && (std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us) { search_cutoff = true; break; } if(equals(its)) { auto id = its[0].id(); istate.num_keyword_matches++; single_filter_result_t filter_result; if(take_id(istate, id, is_excluded, filter_result)) { func(filter_result, its); } if(istate.is_filter_provided() && !is_excluded) { if(istate.is_filter_valid()) { // skip iterator till next id available in filter for(auto& it: its) { it.skip_to(istate.get_filter_id()); } } else { break; } } else { advance_all(its); } } else { advance_non_largest(its); } } } return true; }
6,629
C++
.h
150
29.346667
119
0.492091
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,847
personalization_model.h
typesense_typesense/include/personalization_model.h
#pragma once #include <string> #include <vector> #include <map> #include "embedder_manager.h" #include <json.hpp> class PersonalizationModel { public: static inline const std::map<std::string, std::vector<std::string>> valid_model_names = { {"recommendation", {"tyrec-1"}}, {"search", {"tyrec-2"}} }; PersonalizationModel(const std::string& model_path); ~PersonalizationModel(); static std::string get_model_subdir(const std::string& model_id); static Option<bool> validate_model(const nlohmann::json& model_json); static Option<bool> create_model(const std::string& model_id, const nlohmann::json& model_json, const std::string model_data); static Option<bool> update_model(const std::string& model_id, const nlohmann::json& model_json, const std::string model_data); static Option<bool> delete_model(const std::string& model_id); private: std::string model_path_; std::string model_id_; };
959
C++
.h
23
37.782609
130
0.712594
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,849
system_metrics.h
typesense_typesense/include/system_metrics.h
#include <string> #include <fstream> #include <sstream> #include <thread> #include <sys/stat.h> #include "json.hpp" const int NUM_CPU_STATES = 10; const int NUM_NETWORK_STATS = 16; struct cpu_data_t { std::string cpu; size_t times[NUM_CPU_STATES]; }; enum CPUStates { S_USER = 0, S_NICE, S_SYSTEM, S_IDLE, S_IOWAIT, S_IRQ, S_SOFTIRQ, S_STEAL, S_GUEST, S_GUEST_NICE }; struct cpu_stat_t { std::string active; std::string idle; }; class SystemMetrics { private: const static uint64_t NON_PROC_MEM_UPDATE_INTERVAL_SECONDS = 60; static uint64_t non_proc_mem_last_access; static uint64_t non_proc_mem_bytes; size_t _get_idle_time(const cpu_data_t &e) { // we will consider iowait as cpu being idle return e.times[S_IDLE] + e.times[S_IOWAIT]; } size_t get_total_time(const cpu_data_t &e) { return e.times[S_USER] + e.times[S_NICE] + e.times[S_SYSTEM] + e.times[S_IDLE] + e.times[S_IOWAIT] + e.times[S_IRQ] + e.times[S_SOFTIRQ] + e.times[S_STEAL]; } // https://stackoverflow.com/a/52173118/131050 size_t get_active_time(const cpu_data_t &e) { return get_total_time(e) - _get_idle_time(e); } std::vector<cpu_stat_t> compute_cpu_stats(const std::vector<cpu_data_t>& cpu_data_prev, const std::vector<cpu_data_t>& cpu_data_now) { std::vector<cpu_stat_t> stats; const size_t NUM_ENTRIES = cpu_data_prev.size(); for (size_t i = 0; i < NUM_ENTRIES; ++i) { const cpu_data_t &prev = cpu_data_prev[i]; const cpu_data_t &now = cpu_data_now[i]; auto prev_active = get_active_time(prev); auto now_active = get_active_time(now); auto prev_total = get_total_time(prev); auto now_total = get_total_time(now); auto total_diff = float(now_total - prev_total); auto active_diff = float(now_active - prev_active); // take care to avoid division by zero! float active_percentage = (now_total == prev_total) ? 0 : ((active_diff / total_diff) * 100); float idle_percentage = 100 - active_percentage; cpu_stat_t stat; stat.active = format_dp(active_percentage); stat.idle = format_dp(idle_percentage); stats.push_back(stat); } return stats; } std::string format_dp(float value) const { std::stringstream active_ss; active_ss.setf(std::ios::fixed, std::ios::floatfield); active_ss.precision(2); active_ss << value; return active_ss.str(); } void read_cpu_data(std::vector<cpu_data_t> &entries) { std::ifstream stat_file("/proc/stat"); std::string line; const std::string STR_CPU("cpu"); const std::string STR_TOT("tot"); while (std::getline(stat_file, line)) { // cpu stats line found if (!line.compare(0, STR_CPU.size(), STR_CPU)) { std::istringstream ss(line); // store entry entries.emplace_back(cpu_data_t()); cpu_data_t &entry = entries.back(); // read cpu label ss >> entry.cpu; if (entry.cpu.size() > STR_CPU.size()) { entry.cpu.erase(0, STR_CPU.size()); } else { entry.cpu = STR_TOT; } // read times for (int i = 0; i < NUM_CPU_STATES; ++i) { ss >> entry.times[i]; } } } } static uint64_t get_memory_total_bytes(); static uint64_t get_memory_used_bytes(); static uint64_t linux_get_mem_available_bytes(); static uint64_t get_memory_non_proc_bytes(); public: SystemMetrics() { non_proc_mem_last_access = std::chrono::duration_cast<std::chrono::seconds>( std::chrono::system_clock::now().time_since_epoch()).count(); uint64_t memory_used_bytes = get_memory_used_bytes(); non_proc_mem_bytes = memory_used_bytes - get_memory_active_bytes(); } static uint64_t get_memory_active_bytes(); static void linux_get_network_data(const std::string & stat_path, uint64_t& received_bytes, uint64_t& sent_bytes); void get(const std::string & data_dir_path, nlohmann::json& result); static float used_memory_ratio(); std::vector<cpu_stat_t> get_cpu_stats() { // snapshot 1 std::vector<cpu_data_t> cpu_data_prev; read_cpu_data(cpu_data_prev); // 100ms pause std::this_thread::sleep_for(std::chrono::milliseconds(100)); // snapshot 2 std::vector<cpu_data_t> cpu_data_now; read_cpu_data(cpu_data_now); // compute return compute_cpu_stats(cpu_data_prev, cpu_data_now); } static uint64_t get_memory_free_bytes() { return get_memory_total_bytes() - get_memory_used_bytes(); } };
5,170
C++
.h
139
28.007194
118
0.56662
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,851
core_api.h
typesense_typesense/include/core_api.h
#pragma once #include "http_server.h" #include "auth_manager.h" #include "ratelimit_manager.h" bool handle_authentication(std::map<std::string, std::string>& req_params, std::vector<nlohmann::json>& embedded_params_vec, const std::string& body, const route_path& rpath, const std::string& req_auth_key); bool get_alter_in_progress(const std::string& collection); // Collections bool get_collections(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_create_collection(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool patch_update_collection(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_drop_collection(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_collection_summary(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Documents bool get_search(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_multi_search(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_export_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_add_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool patch_update_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool patch_update_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_import_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_fetch_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_remove_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_remove_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Alias bool get_alias(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_aliases(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_upsert_alias(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_alias(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Presets bool get_presets(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_preset(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_upsert_preset(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_preset(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); //stopwords bool get_stopwords(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_stopword(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_upsert_stopword(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_stopword(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Overrides bool get_overrides(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_override(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_override(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_override(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Synonyms bool get_synonyms(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_synonym(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_synonym(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_synonym(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Keys bool get_keys(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_create_key(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_key(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_key(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Health + Metrics bool get_debug(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_health(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_health_with_resource_usage(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_health(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_metrics_json(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_stats_json(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_status(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // operations bool post_snapshot(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_vote(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_config(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_clear_cache(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_compact_db(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_reset_peers(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Rate Limiting bool get_rate_limits(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_active_throttles(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_limit_exceed_counts(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_throttle(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_exceed(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Analytics bool post_create_event(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_analytics_rule(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_create_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_upsert_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_write_analytics_to_db(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); // Misc helpers void get_collections_for_auth(std::map<std::string, std::string>& req_params, const std::string& body, const route_path& rpath, const std::string& req_auth_key, std::vector<collection_key_t>& collections, std::vector<nlohmann::json>& embedded_params_vec); bool is_doc_import_route(uint64_t route_hash); bool is_coll_create_route(uint64_t route_hash); bool is_drop_collection_route(uint64_t route_hash); bool is_doc_write_route(uint64_t route_hash); bool is_doc_del_route(uint64_t route_hash); Option<std::pair<std::string,std::string>> get_api_key_and_ip(const std::string& metadata); void init_api(uint32_t cache_num_entries); bool post_proxy(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_conversation_models(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool post_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool del_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool get_personalization_models(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); bool put_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res);
9,283
C++
.h
112
79.401786
112
0.727714
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,853
id_list.h
typesense_typesense/include/id_list.h
#pragma once #include <map> #include <unordered_map> #include "sorted_array.h" typedef uint32_t last_id_t; /* Compressed chain of blocks that store the document IDs and offsets of a given token. Offsets of singular and multi-valued fields are encoded differently. */ class id_list_t { public: // A block stores a sorted list of Document IDs compactly struct block_t { sorted_array ids; // link to next block block_t* next = nullptr; bool contains(uint32_t id); uint32_t upsert(uint32_t id); uint32_t erase(uint32_t id); uint32_t size() { return ids.getLength(); } }; class iterator_t { private: block_t* curr_block; int64_t curr_index; block_t* end_block; std::map<last_id_t, block_t*>* id_block_map; bool reverse; public: // uncompressed data structure for performance uint32_t* ids = nullptr; explicit iterator_t(block_t* start, block_t* end, std::map<last_id_t, block_t*>* id_block_map, bool reverse); iterator_t(iterator_t&& rhs) noexcept; ~iterator_t(); iterator_t& operator=(iterator_t&& obj) noexcept; [[nodiscard]] bool valid() const; void next(); void previous(); [[nodiscard]] uint32_t last_block_id() const; void skip_n(uint32_t n); void skip_to(uint32_t id); void reset_cache(); [[nodiscard]] uint32_t id() const; [[nodiscard]] inline uint32_t index() const; [[nodiscard]] inline block_t* block() const; }; struct result_iter_state_t { const uint32_t* excluded_result_ids = nullptr; const size_t excluded_result_ids_size = 0; const uint32_t* filter_ids = nullptr; const size_t filter_ids_length = 0; size_t excluded_result_ids_index = 0; size_t filter_ids_index = 0; size_t index = 0; result_iter_state_t() = default; result_iter_state_t(uint32_t* excluded_result_ids, size_t excluded_result_ids_size, const uint32_t* filter_ids, const size_t filter_ids_length) : excluded_result_ids(excluded_result_ids), excluded_result_ids_size(excluded_result_ids_size), filter_ids(filter_ids), filter_ids_length(filter_ids_length) {} }; private: // maximum number of IDs (and associated offsets) to store in each block before another block is created const uint16_t BLOCK_MAX_ELEMENTS; uint32_t ids_length = 0; block_t root_block; // keeps track of the *last* ID in each block and is used for partial random access // e.g. 0..[9], 10..[19], 20..[29] // MUST be ordered std::map<last_id_t, block_t*> id_block_map; static bool at_end(const std::vector<id_list_t::iterator_t>& its); static bool at_end2(const std::vector<id_list_t::iterator_t>& its); static bool equals(std::vector<id_list_t::iterator_t>& its); static bool equals2(std::vector<id_list_t::iterator_t>& its); static void advance_all(std::vector<id_list_t::iterator_t>& its); static void advance_all2(std::vector<id_list_t::iterator_t>& its); static void advance_non_largest(std::vector<id_list_t::iterator_t>& its); static void advance_non_largest2(std::vector<id_list_t::iterator_t>& its); static uint32_t advance_smallest(std::vector<id_list_t::iterator_t>& its); static uint32_t advance_smallest2(std::vector<id_list_t::iterator_t>& its); public: explicit id_list_t(uint16_t max_block_elements); ~id_list_t(); static void split_block(block_t* src_block, block_t* dst_block); static void merge_adjacent_blocks(block_t* block1, block_t* block2, size_t num_block2_ids_to_move); void upsert(uint32_t id); void erase(uint32_t id); block_t* get_root(); size_t num_blocks() const; size_t num_ids() const; uint32_t first_id(); uint32_t last_id(); block_t* block_of(uint32_t id); bool contains(uint32_t id); bool contains_atleast_one(const uint32_t* target_ids, size_t target_ids_size); iterator_t new_iterator(block_t* start_block = nullptr, block_t* end_block = nullptr); iterator_t new_rev_iterator(); static void merge(const std::vector<id_list_t*>& id_lists, std::vector<uint32_t>& result_ids); static void intersect(const std::vector<id_list_t*>& id_lists, std::vector<uint32_t>& result_ids); static bool take_id(result_iter_state_t& istate, uint32_t id); template<class T> static bool block_intersect( std::vector<id_list_t::iterator_t>& its, result_iter_state_t& istate, T func ); uint32_t* uncompress(); void uncompress(std::vector<uint32_t>& data); size_t intersect_count(const uint32_t* res_ids, size_t res_ids_len, bool estimate_facets, size_t facet_sample_interval); }; template<class T> bool id_list_t::block_intersect(std::vector<id_list_t::iterator_t>& its, result_iter_state_t& istate, T func) { switch (its.size()) { case 0: break; case 1: while(its[0].valid()) { if(id_list_t::take_id(istate, its[0].id())) { func(its[0].id(), its, istate.index); } its[0].next(); } break; case 2: while(!at_end2(its)) { if(equals2(its)) { if(id_list_t::take_id(istate, its[0].id())) { func(its[0].id(), its, istate.index); } advance_all2(its); } else { advance_non_largest2(its); } } break; default: while(!at_end(its)) { if(equals(its)) { //LOG(INFO) << its[0].id(); if(id_list_t::take_id(istate, its[0].id())) { func(its[0].id(), its, istate.index); } advance_all(its); } else { advance_non_largest(its); } } } return false; }
6,392
C++
.h
152
31.815789
153
0.575287
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,854
tokenizer.h
typesense_typesense/include/tokenizer.h
#pragma once #include <string> #include <vector> #include <iconv.h> #include <unicode/brkiter.h> #include <unicode/normalizer2.h> #include <unicode/translit.h> #include "japanese_localizer.h" #include "logger.h" #include "stemmer_manager.h" class Tokenizer { private: std::string text; size_t i; const bool normalize; const bool no_op; size_t token_counter = 0; iconv_t cd; static const size_t INDEX = 0; static const size_t SEPARATE = 1; static const size_t SKIP = 2; uint8_t index_symbols[256] = {}; uint8_t separator_symbols[256] = {}; std::string out; std::string locale; icu::BreakIterator* bi = nullptr; icu::UnicodeString unicode_text; // tracks start of a text segment that can span multiple unicode tokens due to use of custom symbols int32_t utf8_start_index = 0; // tracks current unicode segment for text extraction int32_t start_pos = 0; int32_t end_pos = 0; char* normalized_text = nullptr; // non-deletable singletons const icu::Normalizer2* nfkd = nullptr; const icu::Normalizer2* nfkc = nullptr; icu::Transliterator* transliterator = nullptr; std::shared_ptr<Stemmer> stemmer = nullptr; inline size_t get_stream_mode(char c) { return (std::isalnum(c) || index_symbols[uint8_t(c)] == 1) ? INDEX : ( (c == ' ' || c == '\n' || separator_symbols[uint8_t(c)] == 1) ? SEPARATE : SKIP ); } public: explicit Tokenizer(const std::string& input, bool normalize=true, bool no_op=false, const std::string& locale = "", const std::vector<char>& symbols_to_index = {}, const std::vector<char>& separators = {}, std::shared_ptr<Stemmer> stemmer = nullptr); ~Tokenizer() { iconv_close(cd); free(normalized_text); delete bi; delete transliterator; } void init(const std::string& input); bool next(std::string& token, size_t& token_index, size_t& start_index, size_t& end_index); bool next(std::string& token, size_t& token_index); void tokenize(std::vector<std::string>& tokens); bool tokenize(std::string& token); static bool is_cyrillic(const std::string& locale); static inline bool is_ascii_char(char c) { return (c & ~0x7f) == 0; } static bool belongs_to_general_punctuation_unicode_block(UChar c); void decr_token_counter(); bool should_skip_char(char c); static std::string normalize_ascii_no_spaces(const std::string& text); static bool has_word_tokenizer(const std::string& locale); };
2,682
C++
.h
71
31.323944
104
0.641889
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,855
match_score.h
typesense_typesense/include/match_score.h
#pragma once #include <cstdint> #include <vector> #include <queue> #include <algorithm> #include <cstdlib> #include <limits> #include "logger.h" const size_t WINDOW_SIZE = 10; const uint16_t MAX_DISPLACEMENT = std::numeric_limits<uint16_t>::max(); struct token_positions_t { bool last_token = false; std::vector<uint16_t> positions; }; struct TokenOffset { uint8_t token_id; // token identifier uint16_t offset = MAX_DISPLACEMENT; // token's offset in the text uint32_t offset_index; // index of the offset in the offset vector bool operator()(const TokenOffset &a, const TokenOffset &b) { return a.offset > b.offset; } bool operator>(const TokenOffset &a) const { return offset > a.offset; } bool operator<(const TokenOffset &a) const { return offset < a.offset; } }; struct Match { uint8_t words_present = 0; uint8_t distance = 0; uint8_t max_offset = 0; uint8_t exact_match = 0; std::vector<TokenOffset> offsets; Match() : words_present(0), distance(0), max_offset(0), exact_match(0) { } Match(uint8_t words_present, uint8_t distance, uint8_t max_offset, uint8_t exact_match = 0) : words_present(words_present), distance(distance), max_offset(max_offset), exact_match(exact_match) { } // Construct a single match score from individual components (for multi-field sort) inline uint64_t get_match_score(const uint32_t total_cost, const uint32_t unique_words) const { uint64_t match_score = ( (int64_t(unique_words) << 40) | (int64_t(words_present) << 32) | (int64_t(255 - total_cost) << 24) | (int64_t(100 - distance) << 16) | (int64_t(exact_match) << 8) | (int64_t(255 - max_offset) << 0) ); return match_score; } static void print_token_offsets(std::vector<std::vector<uint16_t>> &token_offsets) { for (auto offsets: token_offsets) { for (auto offset: offsets) { LOG(INFO) << offset << ", "; } LOG(INFO) << ""; } } template<typename T> void sort2(std::vector<T>& a) { if(a[0] < a[1]) { std::swap(a[0], a[1]); } } template<typename T> void sort3(std::vector<T>& a) { if (a[0] > a[1]) { if (a[1] > a[2]) { return; } else if (a[0] > a[2]) { std::swap(a[1], a[2]); } else { T tmp = std::move(a[0]); a[0] = std::move(a[2]); a[2] = std::move(a[1]); a[1] = std::move(tmp); } } else { if (a[0] > a[2]) { std::swap(a[0], a[1]); } else if (a[2] > a[1]) { std::swap(a[0], a[2]); } else { T tmp = std::move(a[0]); a[0] = std::move(a[1]); a[1] = std::move(a[2]); a[2] = std::move(tmp); } } } /* Given *sorted offsets* of each target token in a *single* document (token_offsets), generates a score indicating: a) How many tokens are present within a match window b) The proximity between the tokens within the match window How it works: ------------ Create vector with first offset from each token. Sort vector descending. Calculate distance, use only tokens within max window size from lowest offset. Reassign best window and distance if found. Pop end of vector (smallest offset). Push to vector next offset of token just popped. Until queue size is 1. */ Match(uint32_t doc_id, const std::vector<token_positions_t>& token_offsets, bool populate_window=true, bool check_exact_match=false) { // in case if number of tokens in query is greater than max window const size_t tokens_size = std::min(token_offsets.size(), WINDOW_SIZE); std::vector<TokenOffset> window(tokens_size); for (size_t token_id = 0; token_id < tokens_size; token_id++) { window[token_id] = TokenOffset{static_cast<uint8_t>(token_id), token_offsets[token_id].positions[0], 0}; } std::vector<TokenOffset> best_window; if(populate_window) { best_window = window; } size_t best_num_match = 1; size_t best_displacement = MAX_DISPLACEMENT; int prev_min_offset = -1; while (window.size() > 1) { switch(window.size()) { case 2: sort2<TokenOffset>(window); break; case 3: sort3<TokenOffset>(window); break; default: // descending comparator std::sort(window.begin(), window.end(), std::greater<TokenOffset>()); } size_t min_offset = window.back().offset; if(int(min_offset) < prev_min_offset) { // indicates that one of the offsets are wrapping around (e.g. long document) break; } prev_min_offset = min_offset; size_t this_displacement = 0; size_t this_num_match = 0; std::vector<TokenOffset> this_window(tokens_size); uint16_t prev_offset = window[0].offset; bool all_offsets_are_same = true; for (size_t i = 0; i < window.size(); i++) { if(populate_window) { this_window[window[i].token_id] = window[i]; this_window[window[i].token_id].offset = MAX_DISPLACEMENT; } if ((window[i].offset - min_offset) <= WINDOW_SIZE) { uint16_t next_offset = (i == window.size() - 1) ? window[i].offset : window[i + 1].offset; this_displacement += window[i].offset - next_offset; this_num_match++; if(populate_window) { this_window[window[i].token_id].offset = window[i].offset; } } all_offsets_are_same = all_offsets_are_same && (window[i].offset == prev_offset); } if ( ((this_num_match > best_num_match) || (this_num_match == best_num_match && this_displacement < best_displacement))) { best_displacement = this_displacement; best_num_match = this_num_match; max_offset = std::min((uint16_t)255, window.front().offset); if(populate_window) { best_window = this_window; } } if (best_num_match == tokens_size && best_displacement == (window.size() - 1)) { // this is the best we can get, so quit early! break; } // fill window with next possible smallest offset across available token this_token_offsets const TokenOffset &smallest_offset = window.back(); window.pop_back(); const uint8_t token_id = smallest_offset.token_id; const std::vector<uint16_t>& this_token_offsets = token_offsets[token_id].positions; if (smallest_offset.offset == this_token_offsets.back()) { // no more offsets for this token continue; } // Push next offset of same token popped uint32_t next_offset_index = (smallest_offset.offset_index + 1); TokenOffset token_offset{token_id, this_token_offsets[next_offset_index], next_offset_index}; window.emplace_back(token_offset); } if (best_displacement == MAX_DISPLACEMENT) { best_displacement = 0; } words_present = best_num_match; distance = uint8_t(best_displacement); if(populate_window) { offsets = best_window; } exact_match = 0; if(check_exact_match) { if(distance > token_offsets.size()-1) { // we can exit early and don't have to care about other requirements return; } // 1) distance < num tokens when there are repeating query tokens // 2) distance can be same as num tokens and still not be an exact match int last_token_index = -1; size_t total_offsets = 0; for(const auto& token_positions: token_offsets) { if(token_positions.last_token && !token_positions.positions.empty()) { last_token_index = token_positions.positions.back(); } total_offsets += token_positions.positions.size(); if(total_offsets > token_offsets.size() && distance == token_offsets.size()-1) { // if total offsets exceed query length, there cannot possibly be an exact match return; } } if(last_token_index == int(token_offsets.size())-1) { if(total_offsets == token_offsets.size() && distance == token_offsets.size()-1) { exact_match = 1; } else if(distance < token_offsets.size()-1) { exact_match = 1; } } } } };
9,532
C++
.h
224
30.776786
121
0.532354
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,857
vq_model.h
typesense_typesense/include/vq_model.h
#pragma once #include <string> #include <vector> #include <mutex> #include <shared_mutex> #include <whisper.h> #include "string_utils.h" #include "option.h" class VQModel { protected: int collection_ref_count = 0; std::shared_mutex collection_ref_count_mutex; std::string model_name; public: virtual ~VQModel() = default; virtual Option<std::string> transcribe(const std::string& audio) = 0; void inc_collection_ref_count() { std::unique_lock<std::shared_mutex> lock(collection_ref_count_mutex); collection_ref_count++; } void dec_collection_ref_count() { std::unique_lock<std::shared_mutex> lock(collection_ref_count_mutex); collection_ref_count--; } int get_collection_ref_count() { std::shared_lock<std::shared_mutex> lock(collection_ref_count_mutex); return collection_ref_count; } const std::string& get_model_name() { return model_name; } VQModel(const std::string& model_name) : model_name(model_name) {} }; class WhisperModel : public VQModel { private: whisper_context* ctx = nullptr; whisper_full_params params = whisper_full_default_params(WHISPER_SAMPLING_GREEDY); bool read_wav(const void* data, size_t size, std::vector<float>& pcmf32); std::mutex mutex; public: WhisperModel() = delete; WhisperModel(whisper_context* ctx, const std::string& model_name); static whisper_context* validate_and_load_model(const std::string& model_path); ~WhisperModel(); virtual Option<std::string> transcribe(const std::string& audio_base64) override; };
1,739
C++
.h
46
30.543478
90
0.643576
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,858
personalization_model_manager.h
typesense_typesense/include/personalization_model_manager.h
#pragma once #include <string> #include <unordered_map> #include <shared_mutex> #include <json.hpp> #include "option.h" #include "store.h" #include "personalization_model.h" class PersonalizationModelManager { public: PersonalizationModelManager() = delete; PersonalizationModelManager(const PersonalizationModelManager&) = delete; PersonalizationModelManager(PersonalizationModelManager&&) = delete; PersonalizationModelManager& operator=(const PersonalizationModelManager&) = delete; static Option<nlohmann::json> get_model(const std::string& model_id); static Option<std::string> add_model(nlohmann::json& model,std::string model_id, const bool write_to_disk, const std::string model_data = ""); static Option<nlohmann::json> delete_model(const std::string& model_id); static Option<nlohmann::json> get_all_models(); static Option<nlohmann::json> update_model(const std::string& model_id, nlohmann::json model, const std::string& model_data); static Option<int> init(Store* store); private: static inline std::unordered_map<std::string, nlohmann::json> models; static inline std::shared_mutex models_mutex; static inline Store* store; static constexpr const char* MODEL_KEY_PREFIX = "$PER"; static const std::string get_model_key(const std::string& model_id); };
1,331
C++
.h
27
45.888889
146
0.759045
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,859
tsconfig.h
typesense_typesense/include/tsconfig.h
#pragma once #include <atomic> #include <cmdline.h> #include "option.h" #include "string_utils.h" #include "INIReader.h" #include "json.hpp" class Config { private: std::string data_dir; std::string log_dir; std::string analytics_dir; int32_t analytics_db_ttl = 2419200; //four weeks in secs uint32_t analytics_minute_rate_limit = 5; std::string api_key; // @deprecated std::string search_only_api_key; std::string health_rusage_api_key; std::string api_address; uint32_t api_port; std::string peering_address; uint32_t peering_port; std::string peering_subnet; std::string nodes; std::string master; std::string ssl_certificate; std::string ssl_certificate_key; uint32_t ssl_refresh_interval_seconds; bool enable_cors; std::set<std::string> cors_domains; float max_memory_ratio; int snapshot_interval_seconds; int snapshot_max_byte_count_per_rpc; std::atomic<size_t> healthy_read_lag; std::atomic<size_t> healthy_write_lag; std::string config_file; int config_file_validity; std::atomic<int> log_slow_requests_time_ms; uint32_t num_collections_parallel_load; uint32_t num_documents_parallel_load; uint32_t thread_pool_size; bool enable_access_logging; int disk_used_max_percentage; int memory_used_max_percentage; std::atomic<uint32_t> cache_num_entries = 1000; std::atomic<bool> skip_writes; std::atomic<int> log_slow_searches_time_ms; std::atomic<bool> reset_peers_on_error; bool enable_search_analytics; uint32_t analytics_flush_interval; uint32_t housekeeping_interval; uint32_t db_compaction_interval; bool enable_lazy_filter; bool enable_search_logging; uint32_t max_per_page; uint16_t filter_by_max_ops; protected: Config() { this->api_address = "0.0.0.0"; this->api_port = 8108; this->peering_port = 8107; this->enable_cors = true; this->max_memory_ratio = 1.0f; this->snapshot_interval_seconds = 3600; this->snapshot_max_byte_count_per_rpc = 4194304; this->healthy_read_lag = 1000; this->healthy_write_lag = 500; this->log_slow_requests_time_ms = -1; this->num_collections_parallel_load = 0; // will be set dynamically if not overridden this->num_documents_parallel_load = 1000; this->cache_num_entries = 1000; this->thread_pool_size = 0; // will be set dynamically if not overridden this->ssl_refresh_interval_seconds = 8 * 60 * 60; this->enable_access_logging = false; this->disk_used_max_percentage = 100; this->memory_used_max_percentage = 100; this->skip_writes = false; this->log_slow_searches_time_ms = 30 * 1000; this->reset_peers_on_error = false; this->enable_search_analytics = false; this->analytics_flush_interval = 3600; // in seconds this->housekeeping_interval = 1800; // in seconds this->db_compaction_interval = 0; // in seconds, disabled this->enable_lazy_filter = false; this->enable_search_logging = false; this->max_per_page = 250; this->filter_by_max_ops = FILTER_BY_DEFAULT_OPERATIONS; } Config(Config const&) { } public: static constexpr uint16_t FILTER_BY_DEFAULT_OPERATIONS = 100; static Config & get_instance() { static Config instance; return instance; } void operator=(Config const&) = delete; // setters void set_data_dir(const std::string & data_dir) { this->data_dir = data_dir; } void set_log_dir(const std::string & log_dir) { this->log_dir = log_dir; } void set_analytics_dir(const std::string& analytics_dir) { this->analytics_dir = analytics_dir; } void set_analytics_db_ttl(int32_t analytics_db_ttl) { this->analytics_db_ttl = analytics_db_ttl; } void set_analytics_minute_rate_limit(int32_t analytics_minute_rate_limit) { this->analytics_minute_rate_limit = analytics_minute_rate_limit; } void set_api_key(const std::string & api_key) { this->api_key = api_key; } // @deprecated void set_search_only_api_key(const std::string & search_only_api_key) { this->search_only_api_key = search_only_api_key; } void set_listen_address(const std::string & listen_address) { this->api_address = listen_address; } void set_listen_port(int listen_port) { this->api_port = listen_port; } void set_master(const std::string & master) { this->master = master; } void set_ssl_cert(const std::string & ssl_cert) { this->ssl_certificate = ssl_cert; } void set_ssl_cert_key(const std::string & ssl_cert_key) { this->ssl_certificate_key = ssl_cert_key; } void set_enable_cors(bool enable_cors) { this->enable_cors = enable_cors; } void set_log_slow_requests_time_ms(int log_slow_requests_time_ms) { this->log_slow_requests_time_ms = log_slow_requests_time_ms; } void set_log_slow_searches_time_ms(int log_slow_searches_time_ms) { this->log_slow_searches_time_ms = log_slow_searches_time_ms; } void set_healthy_read_lag(size_t healthy_read_lag) { this->healthy_read_lag = healthy_read_lag; } void set_healthy_write_lag(size_t healthy_write_lag) { this->healthy_write_lag = healthy_write_lag; } void set_cache_num_entries(uint32_t cache_num_entries) { this->cache_num_entries = cache_num_entries; } void set_skip_writes(bool skip_writes) { this->skip_writes = skip_writes; } void set_reset_peers_on_error(bool reset_peers_on_error) { this->reset_peers_on_error = reset_peers_on_error; } void set_max_per_page(int max_per_page) { this->max_per_page = max_per_page; } // getters std::string get_data_dir() const { return this->data_dir; } std::string get_log_dir() const { return this->log_dir; } std::string get_analytics_dir() const { return this->analytics_dir; } int32_t get_analytics_db_ttl() const { return this->analytics_db_ttl; } int32_t get_analytics_minute_rate_limit() const { return this->analytics_minute_rate_limit; } std::string get_api_key() const { return this->api_key; } // @deprecated std::string get_search_only_api_key() const { return this->search_only_api_key; } std::string get_health_rusage_api_key() const { return this->health_rusage_api_key; } std::string get_api_address() const { return this->api_address; } int get_api_port() const { return this->api_port; } std::string get_master() const { return this->master; } std::string get_ssl_cert() const { return this->ssl_certificate; } std::string get_ssl_cert_key() const { return this->ssl_certificate_key; } std::string get_config_file() const { return config_file; } bool get_enable_cors() const { return this->enable_cors; } std::set<std::string> get_cors_domains() const { return this->cors_domains; } std::string get_peering_address() const { return this->peering_address; } std::string get_peering_subnet() const { return this->peering_subnet; } int get_peering_port() const { return this->peering_port; } std::string get_nodes() const { return this->nodes; } float get_max_memory_ratio() const { return this->max_memory_ratio; } int get_snapshot_interval_seconds() const { return this->snapshot_interval_seconds; } int get_snapshot_max_byte_count_per_rpc() const { return this->snapshot_max_byte_count_per_rpc; } size_t get_healthy_read_lag() const { return this->healthy_read_lag; } size_t get_healthy_write_lag() const { return this->healthy_write_lag; } int get_log_slow_requests_time_ms() const { return this->log_slow_requests_time_ms; } int get_log_slow_searches_time_ms() const { return this->log_slow_searches_time_ms; } const std::atomic<bool>& get_reset_peers_on_error() const { return reset_peers_on_error; } size_t get_num_collections_parallel_load() const { return this->num_collections_parallel_load; } size_t get_num_documents_parallel_load() const { return this->num_documents_parallel_load; } size_t get_cache_num_entries() const { return this->cache_num_entries; } size_t get_analytics_flush_interval() const { return this->analytics_flush_interval; } size_t get_housekeeping_interval() const { return this->housekeeping_interval; } size_t get_db_compaction_interval() const { return this->db_compaction_interval; } size_t get_thread_pool_size() const { return this->thread_pool_size; } size_t get_ssl_refresh_interval_seconds() const { return this->ssl_refresh_interval_seconds; } bool get_enable_access_logging() const { return this->enable_access_logging; } bool get_enable_search_analytics() const { return this->enable_search_analytics; } bool get_enable_search_logging() const { return this->enable_search_logging; } int get_disk_used_max_percentage() const { return this->disk_used_max_percentage; } int get_memory_used_max_percentage() const { return this->memory_used_max_percentage; } std::string get_access_log_path() const { if(this->log_dir.empty()) { return ""; } return this->log_dir + "/typesense-access.log"; } bool get_enable_lazy_filter() const { return enable_lazy_filter; } const std::atomic<bool>& get_skip_writes() const { return skip_writes; } int get_max_per_page() const { return this->max_per_page; } uint16_t get_filter_by_max_ops() const { return filter_by_max_ops; } // loaders std::string get_env(const char *name) { const char *ret = getenv(name); if (!ret) { return std::string(); } return std::string(ret); } void load_config_env(); void load_config_file(cmdline::parser & options); void load_config_cmd_args(cmdline::parser & options); void set_cors_domains(std::string& cors_domains_value) { std::vector<std::string> cors_values_vec; StringUtils::split(cors_domains_value, cors_values_vec, ","); cors_domains.clear(); cors_domains.insert(cors_values_vec.begin(), cors_values_vec.end()); } void set_enable_search_analytics(bool enable_search_analytics) { this->enable_search_analytics = enable_search_analytics; } void set_enable_search_logging(bool enable_search_logging) { this->enable_search_logging = enable_search_logging; } // validation Option<bool> is_valid() { if(this->config_file_validity == -1) { return Option<bool>(500, "Error parsing the configuration file."); } if(data_dir.empty()) { return Option<bool>(500, "Data directory is not specified."); } if(api_key.empty()) { return Option<bool>(500, "API key is not specified."); } return Option<bool>(true); } Option<bool> update_config(const nlohmann::json& req_json); static Option<std::string> fetch_file_contents(const std::string & file_path); static Option<std::string> fetch_nodes_config(const std::string& path_to_nodes); };
11,947
C++
.h
345
27.901449
94
0.638148
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,860
text_embedder_remote.h
typesense_typesense/include/text_embedder_remote.h
#pragma once #include <vector> #include <string> #include <mutex> #include "http_client.h" #include "raft_server.h" #include "option.h" struct embedding_res_t { std::vector<float> embedding; nlohmann::json error = nlohmann::json::object(); int status_code; bool success; embedding_res_t() : success(false) {} embedding_res_t(const std::vector<float>& embedding) : embedding(embedding), success(true) {} embedding_res_t(int status_code, const nlohmann::json& error) : error(error), success(false), status_code(status_code) {} }; class RemoteEmbedder { protected: static Option<bool> validate_string_properties(const nlohmann::json& model_config, const std::vector<std::string>& properties); static inline ReplicationState* raft_server = nullptr; std::shared_mutex mutex; public: static long call_remote_api(const std::string& method, const std::string& url, const std::string& req_body, std::string& res_body, std::map<std::string, std::string>& res_headers, std::unordered_map<std::string, std::string>& req_headers); virtual nlohmann::json get_error_json(const nlohmann::json& req_body, long res_code, const std::string& res_body) = 0; virtual embedding_res_t Embed(const std::string& text, const size_t remote_embedder_timeout_ms = 30000, const size_t remote_embedding_num_tries = 2) = 0; virtual std::vector<embedding_res_t> batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size = 200, const size_t remote_embedding_timeout_ms = 60000, const size_t remote_embedding_num_tries = 2) = 0; static const std::string get_model_key(const nlohmann::json& model_config); static void init(ReplicationState* rs) { raft_server = rs; } virtual ~RemoteEmbedder() = default; virtual bool update_api_key(const std::string& api_key) = 0; }; class OpenAIEmbedder : public RemoteEmbedder { private: std::string api_key; std::string openai_model_path; static constexpr char* OPENAI_CREATE_EMBEDDING = "v1/embeddings"; bool has_custom_dims; size_t num_dims; std::string openai_url = "https://api.openai.com"; static std::string get_openai_create_embedding_url(const std::string& openai_url) { return openai_url.back() == '/' ? openai_url + OPENAI_CREATE_EMBEDDING : openai_url + "/" + OPENAI_CREATE_EMBEDDING; } public: OpenAIEmbedder(const std::string& openai_model_path, const std::string& api_key, const size_t num_dims, const bool has_custom_dims, const std::string& openai_url); static Option<bool> is_model_valid(const nlohmann::json& model_config, size_t& num_dims); embedding_res_t Embed(const std::string& text, const size_t remote_embedder_timeout_ms = 30000, const size_t remote_embedding_num_tries = 2) override; std::vector<embedding_res_t> batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size = 200, const size_t remote_embedding_timeout_ms = 60000, const size_t remote_embedding_num_tries = 2) override; nlohmann::json get_error_json(const nlohmann::json& req_body, long res_code, const std::string& res_body) override; static std::string get_model_key(const nlohmann::json& model_config); bool update_api_key(const std::string& apikey) override { std::lock_guard<std::shared_mutex> lock(mutex); api_key = apikey; return true; } }; class GoogleEmbedder : public RemoteEmbedder { private: // only support this model for now inline static const char* SUPPORTED_MODEL = "embedding-gecko-001"; inline static constexpr short GOOGLE_EMBEDDING_DIM = 768; inline static constexpr char* GOOGLE_CREATE_EMBEDDING = "https://generativelanguage.googleapis.com/v1beta2/models/embedding-gecko-001:embedText?key="; std::string google_api_key; public: GoogleEmbedder(const std::string& google_api_key); static Option<bool> is_model_valid(const nlohmann::json& model_config, size_t& num_dims); embedding_res_t Embed(const std::string& text, const size_t remote_embedder_timeout_ms = 30000, const size_t remote_embedding_num_tries = 2) override; std::vector<embedding_res_t> batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size = 200, const size_t remote_embedding_timeout_ms = 60000, const size_t remote_embedding_num_tries = 2) override; nlohmann::json get_error_json(const nlohmann::json& req_body, long res_code, const std::string& res_body) override; static std::string get_model_key(const nlohmann::json& model_config); bool update_api_key(const std::string& apikey) override { std::lock_guard<std::shared_mutex> lock(mutex); google_api_key = apikey; return true; } }; class GCPEmbedder : public RemoteEmbedder { private: std::string project_id; std::string access_token; std::string refresh_token; std::string client_id; std::string client_secret; std::string model_name; inline static const std::string GCP_EMBEDDING_BASE_URL = "https://us-central1-aiplatform.googleapis.com/v1/projects/"; inline static const std::string GCP_EMBEDDING_PATH = "/locations/us-central1/publishers/google/models/"; inline static const std::string GCP_EMBEDDING_PREDICT = ":predict"; inline static const std::string GCP_AUTH_TOKEN_URL = "https://oauth2.googleapis.com/token"; static Option<std::string> generate_access_token(const std::string& refresh_token, const std::string& client_id, const std::string& client_secret); static std::string get_gcp_embedding_url(const std::string& project_id, const std::string& model_name) { return GCP_EMBEDDING_BASE_URL + project_id + GCP_EMBEDDING_PATH + model_name + GCP_EMBEDDING_PREDICT; } public: GCPEmbedder(const std::string& project_id, const std::string& model_name, const std::string& access_token, const std::string& refresh_token, const std::string& client_id, const std::string& client_secret); static Option<bool> is_model_valid(const nlohmann::json& model_config, size_t& num_dims); embedding_res_t Embed(const std::string& text, const size_t remote_embedder_timeout_ms = 30000, const size_t remote_embedding_num_tries = 2) override; std::vector<embedding_res_t> batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size = 200, const size_t remote_embedding_timeout_ms = 60000, const size_t remote_embedding_num_tries = 2) override; nlohmann::json get_error_json(const nlohmann::json& req_body, long res_code, const std::string& res_body) override; static std::string get_model_key(const nlohmann::json& model_config); bool update_api_key(const std::string& api_key) override { return true; } };
7,296
C++
.h
109
57.431193
247
0.672669
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,861
app_metrics.h
typesense_typesense/include/app_metrics.h
#pragma once #include "sparsepp.h" #include "json.hpp" #include "logger.h" #include "tsconfig.h" #include <mutex> #include <string> #include <shared_mutex> #include <mutex> #include <fstream> class AppMetrics { private: mutable std::shared_mutex mutex; // stores last complete window spp::sparse_hash_map<std::string, uint64_t>* counts; spp::sparse_hash_map<std::string, uint64_t>* durations; // stores the current window spp::sparse_hash_map<std::string, uint64_t>* current_counts; spp::sparse_hash_map<std::string, uint64_t>* current_durations; std::string access_log_path; std::ofstream access_log; AppMetrics() { current_counts = new spp::sparse_hash_map<std::string, uint64_t>(); counts = new spp::sparse_hash_map<std::string, uint64_t>(); current_durations = new spp::sparse_hash_map<std::string, uint64_t>(); durations = new spp::sparse_hash_map<std::string, uint64_t>(); access_log_path = Config::get_instance().get_access_log_path(); if(Config::get_instance().get_enable_access_logging() && !access_log_path.empty()) { access_log.open(access_log_path, std::ofstream::out | std::ofstream::app); } } ~AppMetrics() { delete current_counts; delete counts; delete current_durations; delete durations; } public: static inline const std::string SEARCH_LABEL = "search"; static inline const std::string DOC_WRITE_LABEL = "write"; static inline const std::string IMPORT_LABEL = "import"; static inline const std::string DOC_DELETE_LABEL = "delete"; static inline const std::string OVERLOADED_LABEL = "overloaded"; static const uint64_t METRICS_REFRESH_INTERVAL_MS = 10 * 1000; static AppMetrics & get_instance() { static AppMetrics instance; return instance; } AppMetrics(AppMetrics const&) = delete; void operator=(AppMetrics const&) = delete; void increment_count(const std::string& identifier, uint64_t count) { std::unique_lock lock(mutex); (*current_counts)[identifier] += count; } void increment_duration(const std::string& identifier, uint64_t duration) { std::unique_lock lock(mutex); (*current_durations)[identifier] += duration; } void increment_write_metrics(uint64_t route_hash, uint64_t duration); void write_access_log(const uint64_t epoch_millis, const char* remote_ip, const std::string& path); void flush_access_log(); void window_reset(); void get(const std::string& rps_key, const std::string& latency_key, nlohmann::json &result) const; };
2,657
C++
.h
64
35.875
103
0.679238
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,862
index.h
typesense_typesense/include/index.h
#pragma once #include <string> #include <unordered_map> #include <vector> #include <mutex> #include <shared_mutex> #include <condition_variable> #include <art.h> #include <number.h> #include <sparsepp.h> #include <store.h> #include <topster.h> #include <json.hpp> #include <field.h> #include <option.h> #include <set> #include "string_utils.h" #include "num_tree.h" #include "magic_enum.hpp" #include "match_score.h" #include "posting_list.h" #include "threadpool.h" #include "adi_tree.h" #include "tsl/htrie_set.h" #include <tsl/htrie_map.h> #include "id_list.h" #include "synonym_index.h" #include "override.h" #include "vector_query_ops.h" #include "hnswlib/hnswlib.h" #include "filter.h" #include "facet_index.h" #include "numeric_range_trie.h" static constexpr size_t ARRAY_FACET_DIM = 4; using facet_map_t = spp::sparse_hash_map<uint32_t, facet_hash_values_t>; using single_val_facet_map_t = spp::sparse_hash_map<uint32_t, uint32_t>; using array_mapped_facet_t = std::array<facet_map_t*, ARRAY_FACET_DIM>; using array_mapped_single_val_facet_t = std::array<single_val_facet_map_t*, ARRAY_FACET_DIM>; static constexpr size_t ARRAY_INFIX_DIM = 4; using array_mapped_infix_t = std::vector<tsl::htrie_set<char>*>; struct token_t { size_t position; std::string value; bool is_prefix_searched; uint32_t root_len; // if prefix searched, length of the root token uint32_t num_typos; token_t() {}; token_t(size_t position, const std::string& value, bool is_prefix_searched, uint32_t root_len, uint32_t num_typos): position(position), value(value), is_prefix_searched(is_prefix_searched), root_len(root_len), num_typos(num_typos) { } }; // FIXME: deprecated struct token_candidates { token_t token; size_t cost; bool prefix_search; std::vector<art_leaf*> candidates; }; struct tok_candidates { token_t token; size_t cost; bool prefix_search; std::vector<std::string> candidates; }; struct query_tokens_t { std::vector<token_t> q_include_tokens; std::vector<token_t> q_unstemmed_tokens; std::vector<std::vector<std::string>> q_exclude_tokens; std::vector<std::vector<std::string>> q_phrases; std::vector<std::vector<std::string>> q_synonyms; }; enum enable_t { always, fallback, off }; struct search_field_t { std::string name; std::string str_name; // for lookup of non-string fields in art index size_t weight; size_t num_typos; bool prefix; enable_t infix; search_field_t(const std::string& name, const std::string& str_name, size_t weight, size_t num_typos, bool prefix, enable_t infix): name(name), str_name(str_name), weight(weight), num_typos(num_typos), prefix(prefix), infix(infix) { } }; enum text_match_type_t { max_score, sum_score, max_weight }; enum drop_tokens_mode_t { left_to_right, right_to_left, both_sides, }; struct drop_tokens_param_t { drop_tokens_mode_t mode = right_to_left; size_t token_limit = 1000; drop_tokens_param_t() { } drop_tokens_param_t(drop_tokens_mode_t mode, size_t token_limit) : mode(mode), token_limit(token_limit) {} }; struct search_args { std::vector<query_tokens_t> field_query_tokens; std::vector<search_field_t> search_fields; const text_match_type_t match_type; filter_node_t* filter_tree_root; std::vector<facet>& facets; std::vector<std::pair<uint32_t, uint32_t>>& included_ids; std::vector<uint32_t> excluded_ids; std::vector<sort_by>& sort_fields_std; facet_query_t facet_query; std::vector<uint32_t> num_typos; size_t max_facet_values; size_t fetch_size; size_t per_page; size_t offset; token_ordering token_order; std::vector<bool> prefixes; size_t drop_tokens_threshold; size_t typo_tokens_threshold; std::vector<std::string> group_by_fields; size_t group_limit; bool group_missing_values; std::string default_sorting_field; bool prioritize_exact_match; bool prioritize_token_position; bool prioritize_num_matching_fields; size_t all_result_ids_len; bool exhaustive_search; size_t concurrency; size_t search_cutoff_ms; size_t min_len_1typo; size_t min_len_2typo; size_t max_candidates; std::vector<enable_t> infixes; const size_t max_extra_prefix; const size_t max_extra_suffix; const size_t facet_query_num_typos; const bool filter_curated_hits; const enable_t split_join_tokens; tsl::htrie_map<char, token_leaf> qtoken_set; spp::sparse_hash_map<uint64_t, uint32_t> groups_processed; std::vector<std::vector<art_leaf*>> searched_queries; Topster* topster = nullptr; Topster* curated_topster = nullptr; std::vector<std::vector<KV*>> raw_result_kvs; std::vector<std::vector<KV*>> override_result_kvs; vector_query_t& vector_query; size_t facet_sample_percent; size_t facet_sample_threshold; drop_tokens_param_t drop_tokens_mode; bool enable_lazy_filter; size_t max_filter_by_candidates; search_args(std::vector<query_tokens_t> field_query_tokens, std::vector<search_field_t> search_fields, const text_match_type_t match_type, filter_node_t* filter_tree_root, std::vector<facet>& facets, std::vector<std::pair<uint32_t, uint32_t>>& included_ids, std::vector<uint32_t> excluded_ids, std::vector<sort_by>& sort_fields_std, facet_query_t facet_query, const std::vector<uint32_t>& num_typos, size_t max_facet_values, size_t fetch_size, size_t per_page, size_t offset, token_ordering token_order, const std::vector<bool>& prefixes, size_t drop_tokens_threshold, size_t typo_tokens_threshold, const std::vector<std::string>& group_by_fields, size_t group_limit, const bool group_missing_values, const string& default_sorting_field, bool prioritize_exact_match, const bool prioritize_token_position, const bool prioritize_num_matching_fields, bool exhaustive_search, size_t concurrency, size_t search_cutoff_ms, size_t min_len_1typo, size_t min_len_2typo, size_t max_candidates, const std::vector<enable_t>& infixes, const size_t max_extra_prefix, const size_t max_extra_suffix, const size_t facet_query_num_typos, const bool filter_curated_hits, const enable_t split_join_tokens, vector_query_t& vector_query, size_t facet_sample_percent, size_t facet_sample_threshold, drop_tokens_param_t drop_tokens_mode, bool enable_lazy_filter, const size_t max_filter_by_candidates) : field_query_tokens(field_query_tokens), search_fields(search_fields), match_type(match_type), filter_tree_root(filter_tree_root), facets(facets), included_ids(included_ids), excluded_ids(excluded_ids), sort_fields_std(sort_fields_std), facet_query(facet_query), num_typos(num_typos), max_facet_values(max_facet_values), fetch_size(fetch_size), per_page(per_page), offset(offset), token_order(token_order), prefixes(prefixes), drop_tokens_threshold(drop_tokens_threshold), typo_tokens_threshold(typo_tokens_threshold), group_by_fields(group_by_fields), group_limit(group_limit), group_missing_values(group_missing_values), default_sorting_field(default_sorting_field), prioritize_exact_match(prioritize_exact_match), prioritize_token_position(prioritize_token_position), prioritize_num_matching_fields(prioritize_num_matching_fields), all_result_ids_len(0), exhaustive_search(exhaustive_search), concurrency(concurrency), search_cutoff_ms(search_cutoff_ms), min_len_1typo(min_len_1typo), min_len_2typo(min_len_2typo), max_candidates(max_candidates), infixes(infixes), max_extra_prefix(max_extra_prefix), max_extra_suffix(max_extra_suffix), facet_query_num_typos(facet_query_num_typos), filter_curated_hits(filter_curated_hits), split_join_tokens(split_join_tokens), vector_query(vector_query), facet_sample_percent(facet_sample_percent), facet_sample_threshold(facet_sample_threshold), drop_tokens_mode(drop_tokens_mode), enable_lazy_filter(enable_lazy_filter), max_filter_by_candidates(max_filter_by_candidates) { } ~search_args() { delete topster; delete curated_topster; }; }; enum facet_index_type_t { exhaustive, top_values, automatic, }; struct offsets_facet_hashes_t { // token to offsets std::unordered_map<std::string, std::vector<uint32_t>> offsets; }; struct index_record { size_t position; // position of record in the original request uint32_t seq_id; nlohmann::json doc; // actual document sent in request (could be partial) nlohmann::json old_doc; // previously stored *full* document from disk nlohmann::json new_doc; // new *full* document to be stored into disk nlohmann::json del_doc; // document containing the fields that should be deleted nlohmann::json embedding_res; // embedding result int embedding_status_code; // embedding status code index_operation_t operation; bool is_update; // pre-processed data primed for indexing std::unordered_map<std::string, offsets_facet_hashes_t> field_index; int64_t points; Option<bool> indexed; // indicates if the indexing operation was a success DIRTY_VALUES dirty_values; index_record(size_t record_pos, uint32_t seq_id, const nlohmann::json& doc, index_operation_t operation, const DIRTY_VALUES& dirty_values): position(record_pos), seq_id(seq_id), doc(doc), operation(operation), is_update(false), indexed(false), dirty_values(dirty_values) { } index_record(index_record&& rhs) = default; index_record& operator=(index_record&& mE) = default; void index_failure(const uint32_t err_code, const std::string & err_msg) { indexed = Option<bool>(err_code, err_msg); } void index_success() { indexed = Option<bool>(true); } }; class VectorFilterFunctor: public hnswlib::BaseFilterFunctor { filter_result_iterator_t* const filter_result_iterator; const uint32_t* excluded_ids = nullptr; const uint32_t excluded_ids_length = 0; public: explicit VectorFilterFunctor(filter_result_iterator_t* const filter_result_iterator, const uint32_t* excluded_ids = nullptr, const uint32_t excluded_ids_length = 0) : filter_result_iterator(filter_result_iterator), excluded_ids(excluded_ids), excluded_ids_length(excluded_ids_length) {} bool operator()(hnswlib::labeltype id) override { if (filter_result_iterator->approx_filter_ids_length == 0 && excluded_ids_length == 0) { return true; } if(excluded_ids_length > 0 && excluded_ids && std::binary_search(excluded_ids, excluded_ids + excluded_ids_length, id)) { return false; } if(filter_result_iterator->approx_filter_ids_length == 0) { return true; } filter_result_iterator->reset(); return filter_result_iterator->is_valid(id) == 1; } }; struct hnsw_index_t { hnswlib::InnerProductSpace* space; hnswlib::HierarchicalNSW<float>* vecdex; size_t num_dim; vector_distance_type_t distance_type; // ensures that this index is not dropped when it's being repaired std::mutex repair_m; hnsw_index_t(size_t num_dim, size_t init_size, vector_distance_type_t distance_type, size_t M = 16, size_t ef_construction = 200) : space(new hnswlib::InnerProductSpace(num_dim)), vecdex(new hnswlib::HierarchicalNSW<float>(space, init_size, M, ef_construction, 100, true)), num_dim(num_dim), distance_type(distance_type) { } ~hnsw_index_t() { std::lock_guard lk(repair_m); delete vecdex; delete space; } // needed for cosine similarity static void normalize_vector(const std::vector<float>& src, std::vector<float>& norm_dest) { float norm = 0.0f; for (float i : src) { norm += i * i; } norm = 1.0f / (sqrtf(norm) + 1e-30f); for (size_t i = 0; i < src.size(); i++) { norm_dest[i] = src[i] * norm; } } }; struct group_by_field_it_t { std::string field_name; posting_list_t::iterator_t it; bool is_array; }; struct Hasher32 { // Helps to spread the hash key and is used for sort index. // see: https://github.com/greg7mdp/sparsepp/issues/21#issuecomment-270816275 size_t operator()(uint32_t k) const { return (k ^ 2166136261U) * 16777619UL; } }; struct pair_hash { template <class T1, class T2> std::size_t operator() (const std::pair<T1, T2> &pair) const { return std::hash<T1>()(pair.first) ^ std::hash<T2>()(pair.second); } }; class Index { private: mutable std::shared_mutex mutex; std::string name; const uint32_t collection_id; const Store* store; const SynonymIndex* synonym_index; ThreadPool* thread_pool; size_t num_documents; tsl::htrie_map<char, field> search_schema; spp::sparse_hash_map<std::string, art_tree*> search_index; spp::sparse_hash_map<std::string, num_tree_t*> numerical_index; // reference_helper_field => (seq_id => ref_seq_ids) // Only used when the reference field is an array type otherwise sort_index is used. spp::sparse_hash_map<std::string, num_tree_t*> reference_index; /// field_name => ((doc_id, object_index) => ref_doc_id) /// Used when a field inside an object array has reference. spp::sparse_hash_map<std::string, spp::sparse_hash_map<std::pair<uint32_t, uint32_t>, uint32_t, pair_hash>*> object_array_reference_index; spp::sparse_hash_map<std::string, NumericTrie*> range_index; spp::sparse_hash_map<std::string, NumericTrie*> geo_range_index; // geo_array_field => (seq_id => values) used for exact filtering of geo array records spp::sparse_hash_map<std::string, spp::sparse_hash_map<uint32_t, int64_t*>*> geo_array_index; facet_index_t* facet_index_v4 = nullptr; // sort_field => (seq_id => value) spp::sparse_hash_map<std::string, spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*> sort_index; typedef spp::sparse_hash_map<std::string, spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*>::iterator sort_index_iterator; // str_sort_field => adi_tree_t spp::sparse_hash_map<std::string, adi_tree_t*> str_sort_index; // infix field => value spp::sparse_hash_map<std::string, array_mapped_infix_t> infix_index; // vector field => vector index spp::sparse_hash_map<std::string, hnsw_index_t*> vector_index; // this is used for wildcard queries id_list_t* seq_ids; std::vector<char> symbols_to_index; std::vector<char> token_separators; StringUtils string_utils; // used as sentinels static spp::sparse_hash_map<uint32_t, int64_t, Hasher32> text_match_sentinel_value; static spp::sparse_hash_map<uint32_t, int64_t, Hasher32> seq_id_sentinel_value; static spp::sparse_hash_map<uint32_t, int64_t, Hasher32> eval_sentinel_value; static spp::sparse_hash_map<uint32_t, int64_t, Hasher32> geo_sentinel_value; static spp::sparse_hash_map<uint32_t, int64_t, Hasher32> str_sentinel_value; static spp::sparse_hash_map<uint32_t, int64_t, Hasher32> vector_distance_sentinel_value; static spp::sparse_hash_map<uint32_t, int64_t, Hasher32> vector_query_sentinel_value; // Internal utility functions static inline uint32_t next_suggestion2(const std::vector<tok_candidates>& token_candidates_vec, long long int n, std::vector<token_t>& query_suggestion, uint64& qhash); static inline uint32_t next_suggestion(const std::vector<token_candidates> &token_candidates_vec, long long int n, std::vector<art_leaf *>& actual_query_suggestion, std::vector<art_leaf *>& query_suggestion, int syn_orig_num_tokens, uint32_t& token_bits, uint64& qhash); void log_leaves(int cost, const std::string &token, const std::vector<art_leaf *> &leaves) const; void do_facets(std::vector<facet> & facets, facet_query_t & facet_query, bool estimate_facets, size_t facet_sample_percent, const std::vector<facet_info_t>& facet_infos, size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, const uint32_t* result_ids, size_t results_size, int max_facet_count, bool is_wildcard_query, const std::vector<facet_index_type_t>& facet_index_types) const; bool static_filter_query_eval(const override_t* override, std::vector<std::string>& tokens, filter_node_t*& filter_tree_root) const; bool resolve_override(const std::vector<std::string>& rule_tokens, bool exact_rule_match, const std::vector<std::string>& query_tokens, token_ordering token_order, std::set<std::string>& absorbed_tokens, std::string& filter_by_clause, bool enable_typos_for_numerical_tokens, bool enable_typos_for_alpha_numerical_tokens) const; bool check_for_overrides(const token_ordering& token_order, const string& field_name, bool slide_window, bool exact_rule_match, std::vector<std::string>& tokens, std::set<std::string>& absorbed_tokens, std::vector<std::string>& field_absorbed_tokens, bool enable_typos_for_numerical_tokens, bool enable_typos_for_alpha_numerical_tokens) const; static void aggregate_topster(Topster* agg_topster, Topster* index_topster); Option<bool> search_all_candidates(const size_t num_search_fields, const text_match_type_t match_type, const std::vector<search_field_t>& the_fields, filter_result_iterator_t* const filter_result_iterator, const uint32_t* excluded_result_ids, size_t excluded_result_ids_size, const std::unordered_set<uint32_t>& excluded_group_ids, const std::vector<sort_by>& sort_fields, std::vector<tok_candidates>& token_candidates_vec, std::vector<std::vector<art_leaf*>>& searched_queries, tsl::htrie_map<char, token_leaf>& qtoken_set, const std::vector<token_t>& dropped_tokens, Topster*& topster, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, size_t& num_keyword_matches, uint32_t*& all_result_ids, size_t& all_result_ids_len, const size_t typo_tokens_threshold, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, const std::vector<token_t>& query_tokens, const std::vector<uint32_t>& num_typos, const std::vector<bool>& prefixes, bool prioritize_exact_match, const bool prioritize_token_position, const bool exhaustive_search, const bool prioritize_num_matching_fields, const size_t max_candidates, int syn_orig_num_tokens, const int* sort_order, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values, const std::vector<size_t>& geopoint_indices, std::set<uint64>& query_hashes, std::vector<uint32_t>& id_buff, const std::string& collection_name = "") const; static void popular_fields_of_token(const spp::sparse_hash_map<std::string, art_tree*>& search_index, const std::string& previous_token, const std::vector<search_field_t>& the_fields, const size_t num_search_fields, std::vector<size_t>& popular_field_ids); bool field_is_indexed(const std::string& field_name) const; static void tokenize_string(const std::string& text, const field& a_field, const std::vector<char>& symbols_to_index, const std::vector<char>& token_separators, std::unordered_map<std::string, std::vector<uint32_t>>& token_to_offsets); static void tokenize_string_array(const std::vector<std::string>& strings, const field& a_field, const std::vector<char>& symbols_to_index, const std::vector<char>& token_separators, std::unordered_map<std::string, std::vector<uint32_t>>& token_to_offsets); void collate_included_ids(const std::vector<token_t>& q_included_tokens, const std::map<size_t, std::map<size_t, uint32_t>> & included_ids_map, Topster*& curated_topster, std::vector<std::vector<art_leaf*>> & searched_queries) const; static void compute_facet_stats(facet &a_facet, const std::string& raw_value, const std::string & field_type, const size_t count); static void compute_facet_stats(facet &a_facet, const int64_t raw_value, const std::string & field_type); static void handle_doc_ops(const tsl::htrie_map<char, field>& search_schema, nlohmann::json& update_doc, const nlohmann::json& old_doc); static void get_doc_changes(const index_operation_t op, const tsl::htrie_map<char, field>& embedding_fields, nlohmann::json &update_doc, const nlohmann::json &old_doc, nlohmann::json &new_doc, nlohmann::json &del_doc); bool common_results_exist(std::vector<art_leaf*>& leaves, bool must_match_phrase) const; static void remove_facet_token(const field& search_field, spp::sparse_hash_map<std::string, art_tree*>& search_index, const std::string& token, uint32_t seq_id); void initialize_facet_indexes(const field& facet_field); std::vector<group_by_field_it_t> get_group_by_field_iterators(const std::vector<std::string>&, bool is_reverse=false) const; static void batch_embed_fields(std::vector<index_record*>& documents, const tsl::htrie_map<char, field>& embedding_fields, const tsl::htrie_map<char, field> & search_schema, const size_t remote_embedding_batch_size = 200, const size_t remote_embedding_timeout_ms = 60000, const size_t remote_embedding_num_tries = 2); static void process_embed_results(std::vector<std::pair<index_record*, std::string>>& values_to_embed, const index_record* record, const std::vector<embedding_res_t>& embedding_results, size_t& count, const field& the_field); void update_async_references(const std::string& collection_name, const field& afield, std::vector<index_record>& iter_batch, const std::set<reference_pair_t>& async_referenced_ins = {}); std::string get_collection_name_with_lock() const { std::shared_lock lock(mutex); return get_collection_name(); } std::string get_collection_name() const { // Index name is: collection_name + std::to_string(0) return name.empty() ? "" : name.substr(0, name.size() - 1); } Option<uint32_t> get_sort_index_value(const std::string& field_name, const uint32_t& seq_id) const; Option<int64_t> get_geo_distance(const std::string& geo_field_name, const uint32_t& seq_id, const S2LatLng& reference_lat_lng, const bool& round_distance = false) const; Option<uint32_t> get_ref_seq_id_helper(const sort_by& sort_field, const uint32_t& seq_id, std::string& prev_coll_name, std::map<std::string, reference_filter_result_t> const*& references, std::string& ref_coll_name) const; public: // for limiting number of results on multiple candidates / query rewrites enum {TYPO_TOKENS_THRESHOLD = 1}; // for limiting number of fields that can be searched on enum {FIELD_LIMIT_NUM = 100}; // Values 0 to 15 are allowed enum {FIELD_MAX_WEIGHT = 15}; enum {COMBINATION_MAX_LIMIT = 10000}; enum {COMBINATION_MIN_LIMIT = 10}; enum {NUM_CANDIDATES_DEFAULT_MIN = 4}; enum {NUM_CANDIDATES_DEFAULT_MAX = 10}; // If the number of results found is less than this threshold, Typesense will attempt to drop the tokens // in the query that have the least individual hits one by one until enough results are found. static const int DROP_TOKENS_THRESHOLD = 1; enum {DEFAULT_TOPSTER_SIZE = 250}; /// Value used when async_reference is true and a reference doc is not found. static constexpr int64_t reference_helper_sentinel_value = UINT32_MAX; Index() = delete; Index(const std::string& name, const uint32_t collection_id, const Store* store, SynonymIndex* synonym_index, ThreadPool* thread_pool, const tsl::htrie_map<char, field>& search_schema, const std::vector<char>& symbols_to_index, const std::vector<char>& token_separators); ~Index(); static void concat_topster_ids(Topster*& topster, spp::sparse_hash_map<uint64_t, std::vector<KV*>>& topster_ids); int64_t score_results2(const std::vector<sort_by> & sort_fields, const uint16_t & query_index, const size_t field_id, const bool field_is_array, const uint32_t total_cost, int64_t& match_score, const uint32_t seq_id, const int sort_order[3], const bool prioritize_exact_match, const bool single_exact_query_token, const bool prioritize_token_position, size_t num_query_tokens, int syn_orig_num_tokens, const std::vector<posting_list_t::iterator_t>& posting_lists) const; void score_results(const std::vector<sort_by> &sort_fields, const uint16_t &query_index, const uint8_t &field_id, bool field_is_array, const uint32_t total_cost, Topster*& topster, const std::vector<art_leaf *> &query_suggestion, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, const uint32_t seq_id, const int sort_order[3], std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values, const std::vector<size_t>& geopoint_indices, const size_t group_limit, const std::vector<std::string> &group_by_fields, const bool group_missing_values, uint32_t token_bits, bool prioritize_exact_match, bool single_exact_query_token, int syn_orig_num_tokens, const std::vector<posting_list_t::iterator_t>& posting_lists) const; static int64_t get_points_from_doc(const nlohmann::json &document, const std::string & default_sorting_field); const spp::sparse_hash_map<std::string, art_tree *>& _get_search_index() const; const spp::sparse_hash_map<std::string, num_tree_t*>& _get_numerical_index() const; const spp::sparse_hash_map<std::string, NumericTrie*>& _get_range_index() const; const spp::sparse_hash_map<std::string, array_mapped_infix_t>& _get_infix_index() const; const spp::sparse_hash_map<std::string, hnsw_index_t*>& _get_vector_index() const; facet_index_t* _get_facet_index() const; static int get_bounded_typo_cost(const size_t max_cost, const std::string& token, const size_t token_len, size_t min_len_1typo, size_t min_len_2typo, bool enable_typos_for_numerical_tokens=true, bool enable_typos_for_alpha_numerical_tokens = true); static int64_t float_to_int64_t(float n); static float int64_t_to_float(int64_t n); void get_distinct_id(posting_list_t::iterator_t& facet_index_it, const uint32_t seq_id, const bool is_array, const bool group_missing_values, uint64_t& distinct_id, bool is_reverse=false) const; static void compute_token_offsets_facets(index_record& record, const tsl::htrie_map<char, field>& search_schema, const std::vector<char>& local_token_separators, const std::vector<char>& local_symbols_to_index); static void tokenize_string_field(const nlohmann::json& document, const field& search_field, std::vector<std::string>& tokens, const std::string& locale, const std::vector<char>& symbols_to_index, const std::vector<char>& token_separators); // Public operations Option<bool> run_search(search_args* search_params, const std::string& collection_name, const std::vector<facet_index_type_t>& facet_index_types, bool enable_typos_for_numerical_tokens, bool enable_synonyms, bool synonym_prefix, uint32_t synonym_num_typos, bool enable_typos_for_alpha_numerical_tokens, bool rerank_hybrid_matches); Option<bool> search(std::vector<query_tokens_t>& field_query_tokens, const std::vector<search_field_t>& the_fields, const text_match_type_t match_type, filter_node_t*& filter_tree_root, std::vector<facet>& facets, facet_query_t& facet_query, const int max_facet_values, const std::vector<std::pair<uint32_t, uint32_t>>& included_ids, const std::vector<uint32_t>& excluded_ids, std::vector<sort_by>& sort_fields_std, const std::vector<uint32_t>& num_typos, Topster*& topster, Topster*& curated_topster, const size_t fetch_size, const size_t per_page, const size_t offset, const token_ordering token_order, const std::vector<bool>& prefixes, const size_t drop_tokens_threshold, size_t& all_result_ids_len, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, std::vector<std::vector<art_leaf*>>& searched_queries, tsl::htrie_map<char, token_leaf>& qtoken_set, std::vector<std::vector<KV*>>& raw_result_kvs, std::vector<std::vector<KV*>>& override_result_kvs, const size_t typo_tokens_threshold, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, const string& default_sorting_field, bool prioritize_exact_match, const bool prioritize_token_position, const bool prioritize_num_matching_fields, bool exhaustive_search, size_t concurrency, size_t search_cutoff_ms, size_t min_len_1typo, size_t min_len_2typo, size_t max_candidates, const std::vector<enable_t>& infixes, const size_t max_extra_prefix, const size_t max_extra_suffix, const size_t facet_query_num_typos, const bool filter_curated_hits, enable_t split_join_tokens, const vector_query_t& vector_query, size_t facet_sample_percent, size_t facet_sample_threshold, const std::string& collection_name, const drop_tokens_param_t drop_tokens_mode, const std::vector<facet_index_type_t>& facet_index_types, bool enable_typos_for_numerical_tokens = true, bool enable_synonyms = true, bool synonym_prefix = false, uint32_t synonym_num_typos = 0, bool enable_lazy_filter = false, bool enable_typos_for_alpha_numerical_tokens = true, const size_t& max_filter_by_candidates = DEFAULT_FILTER_BY_CANDIDATES, bool rerank_hybrid_matches = false ) const; void remove_field(uint32_t seq_id, nlohmann::json& document, const std::string& field_name, const bool is_update); Option<uint32_t> remove(const uint32_t seq_id, nlohmann::json & document, const std::vector<field>& del_fields, const bool is_update); static void validate_and_preprocess(Index *index, std::vector<index_record>& iter_batch, const size_t batch_start_index, const size_t batch_size, const std::string & default_sorting_field, const tsl::htrie_map<char, field> & search_schema, const tsl::htrie_map<char, field> & embedding_fields, const std::string& fallback_field_type, const std::vector<char>& token_separators, const std::vector<char>& symbols_to_index, const bool do_validation, const size_t remote_embedding_batch_size = 200, const size_t remote_embedding_timeout_ms = 60000, const size_t remote_embedding_num_tries = 2, const bool generate_embeddings = true); static size_t batch_memory_index(Index *index, std::vector<index_record>& iter_batch, const std::string& default_sorting_field, const tsl::htrie_map<char, field>& search_schema, const tsl::htrie_map<char, field> & embedding_fields, const std::string& fallback_field_type, const std::vector<char>& token_separators, const std::vector<char>& symbols_to_index, const bool do_validation, const size_t remote_embedding_batch_size = 200, const size_t remote_embedding_timeout_ms = 60000, const size_t remote_embedding_num_tries = 2, const bool generate_embeddings = true, const bool use_addition_fields = false, const tsl::htrie_map<char, field>& addition_fields = tsl::htrie_map<char, field>(), const std::string& collection_name = "", const spp::sparse_hash_map<std::string, std::set<reference_pair_t>>& async_referenced_ins = spp::sparse_hash_map<std::string, std::set<reference_pair_t>>()); void index_field_in_memory(const std::string& collection_name, const field& afield, std::vector<index_record>& iter_batch, const std::set<reference_pair_t>& async_referenced_ins = {}); template<class T> void iterate_and_index_numerical_field(std::vector<index_record>& iter_batch, const field& afield, T func); //static bool is_point_in_polygon(const Geofence& poly, const GeoCoord& point); //static double transform_for_180th_meridian(Geofence& poly); //static void transform_for_180th_meridian(GeoCoord& point, double offset); art_leaf* get_token_leaf(const std::string & field_name, const unsigned char* token, uint32_t token_len); Option<bool> do_filtering_with_lock(filter_node_t* const filter_tree_root, filter_result_t& filter_result, const std::string& collection_name = "", const bool& should_timeout = true) const; Option<bool> do_reference_filtering_with_lock(filter_node_t* const filter_tree_root, filter_result_t& filter_result, const std::string& ref_collection_name, const std::string& field_name) const; Option<filter_result_t> do_filtering_with_reference_ids(const std::string& field_name, const std::string& ref_collection_name, filter_result_t&& ref_filter_result) const; void refresh_schemas(const std::vector<field>& new_fields, const std::vector<field>& del_fields); // the following methods are not synchronized because their parent calls are synchronized or they are const/static Option<bool> search_wildcard(filter_node_t const* const& filter_tree_root, const std::vector<sort_by>& sort_fields, Topster*& topster, Topster*& curated_topster, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, std::vector<std::vector<art_leaf*>>& searched_queries, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, const uint32_t* exclude_token_ids, size_t exclude_token_ids_size, const std::unordered_set<uint32_t>& excluded_group_ids, uint32_t*& all_result_ids, size_t& all_result_ids_len, filter_result_iterator_t* const filter_result_iterator, const size_t concurrency, const int* sort_order, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values, const std::vector<size_t>& geopoint_indices, const std::string& collection_name = "") const; Option<bool> search_infix(const std::string& query, const std::string& field_name, std::vector<uint32_t>& ids, size_t max_extra_prefix, size_t max_extra_suffix) const; void curate_filtered_ids(const uint32_t* exclude_token_ids, size_t exclude_token_ids_size, uint32_t*& filter_ids, uint32_t& filter_ids_length, const std::vector<uint32_t>& curated_ids_sorted) const; Option<bool> populate_sort_mapping(int* sort_order, std::vector<size_t>& geopoint_indices, std::vector<sort_by>& sort_fields_std, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values) const; Option<bool> populate_sort_mapping_with_lock(int* sort_order, std::vector<size_t>& geopoint_indices, std::vector<sort_by>& sort_fields_std, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values) const; int64_t reference_string_sort_score(const std::string& field_name, const uint32_t& seq_id) const; static void remove_matched_tokens(std::vector<std::string>& tokens, const std::set<std::string>& rule_token_set) ; void compute_facet_infos(const std::vector<facet>& facets, facet_query_t& facet_query, const uint32_t facet_query_num_typos, uint32_t* all_result_ids, const size_t& all_result_ids_len, const std::vector<std::string>& group_by_fields, size_t group_limit, bool is_wildcard_no_filter_query, size_t max_candidates, std::vector<facet_info_t>& facet_infos, const std::vector<facet_index_type_t>& facet_index_types ) const; void resolve_space_as_typos(std::vector<std::string>& qtokens, const std::string& field_name, std::vector<std::vector<std::string>>& resolved_queries) const; size_t num_seq_ids() const; void handle_exclusion(const size_t num_search_fields, std::vector<query_tokens_t>& field_query_tokens, const std::vector<search_field_t>& search_fields, uint32_t*& exclude_token_ids, size_t& exclude_token_ids_size) const; Option<bool> do_infix_search(const size_t num_search_fields, const std::vector<search_field_t>& the_fields, const std::vector<enable_t>& infixes, const std::vector<sort_by>& sort_fields, std::vector<std::vector<art_leaf*>>& searched_queries, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, const size_t max_extra_prefix, const size_t max_extra_suffix, const std::vector<token_t>& query_tokens, Topster* actual_topster, filter_result_iterator_t* const filter_result_iterator, const int sort_order[3], std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values, const std::vector<size_t>& geopoint_indices, const std::vector<uint32_t>& curated_ids_sorted, const std::unordered_set<uint32_t>& excluded_group_ids, uint32_t*& all_result_ids, size_t& all_result_ids_len, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, const std::string& collection_name = "") const; [[nodiscard]] Option<bool> do_synonym_search(const std::vector<search_field_t>& the_fields, const text_match_type_t match_type, filter_node_t const* const& filter_tree_root, const std::vector<sort_by>& sort_fields_std, Topster*& curated_topster, const token_ordering& token_order, const size_t typo_tokens_threshold, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, bool prioritize_exact_match, const bool prioritize_token_position, const bool prioritize_num_matching_fields, const bool exhaustive_search, const size_t concurrency, const std::vector<bool>& prefixes, size_t min_len_1typo, size_t min_len_2typo, const size_t max_candidates, const std::set<uint32_t>& curated_ids, const std::vector<uint32_t>& curated_ids_sorted, const uint32_t* exclude_token_ids, size_t exclude_token_ids_size, const std::unordered_set<uint32_t>& excluded_group_ids, Topster* actual_topster, std::vector<std::vector<token_t>>& q_pos_synonyms, int syn_orig_num_tokens, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, std::vector<std::vector<art_leaf*>>& searched_queries, uint32_t*& all_result_ids, size_t& all_result_ids_len, filter_result_iterator_t* const filter_result_iterator, std::set<uint64>& query_hashes, const int* sort_order, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values, const std::vector<size_t>& geopoint_indices, tsl::htrie_map<char, token_leaf>& qtoken_set, const std::string& collection_name = "") const; Option<bool> do_phrase_search(const size_t num_search_fields, const std::vector<search_field_t>& search_fields, std::vector<query_tokens_t>& field_query_tokens, const std::vector<sort_by>& sort_fields, std::vector<std::vector<art_leaf*>>& searched_queries, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, Topster* actual_topster, const int sort_order[3], std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values, const std::vector<size_t>& geopoint_indices, filter_result_iterator_t*& filter_result_iterator, uint32_t*& all_result_ids, size_t& all_result_ids_len, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, const uint32_t* excluded_result_ids, size_t excluded_result_ids_size, const std::unordered_set<uint32_t>& excluded_group_ids, bool is_wildcard_query, const std::string& collection_name = "") const; [[nodiscard]] Option<bool> fuzzy_search_fields(const std::vector<search_field_t>& the_fields, const std::vector<token_t>& query_tokens, const std::vector<token_t>& dropped_tokens, const text_match_type_t match_type, const uint32_t* excluded_result_ids, size_t excluded_result_ids_size, filter_result_iterator_t* const filter_result_iterator, const std::vector<uint32_t>& curated_ids, const std::unordered_set<uint32_t>& excluded_group_ids, const std::vector<sort_by>& sort_fields, const std::vector<uint32_t>& num_typos, std::vector<std::vector<art_leaf*>>& searched_queries, tsl::htrie_map<char, token_leaf>& qtoken_set, Topster*& topster, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, uint32_t*& all_result_ids, size_t& all_result_ids_len, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, bool prioritize_exact_match, const bool prioritize_token_position, const bool prioritize_num_matching_fields, std::set<uint64>& query_hashes, const token_ordering token_order, const std::vector<bool>& prefixes, const size_t typo_tokens_threshold, const bool exhaustive_search, const size_t max_candidates, size_t min_len_1typo, size_t min_len_2typo, int syn_orig_num_tokens, const int* sort_order, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values, const std::vector<size_t>& geopoint_indices, const std::string& collection_name = "", bool enable_typos_for_numerical_tokens = true, bool enable_typos_for_alpha_numerical_tokens = true) const; void find_across_fields(const token_t& previous_token, const std::string& previous_token_str, const std::vector<search_field_t>& the_fields, const size_t num_search_fields, filter_result_iterator_t* const filter_result_iterator, const uint32_t* exclude_token_ids, size_t exclude_token_ids_size, std::vector<uint32_t>& prev_token_doc_ids, std::vector<size_t>& top_prefix_field_ids) const; Option<bool> search_across_fields(const std::vector<token_t>& query_tokens, const std::vector<uint32_t>& num_typos, const std::vector<bool>& prefixes, const std::vector<search_field_t>& the_fields, const size_t num_search_fields, const text_match_type_t match_type, const std::vector<sort_by>& sort_fields, Topster*& topster, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed, std::vector<std::vector<art_leaf*>>& searched_queries, tsl::htrie_map<char, token_leaf>& qtoken_set, const std::vector<token_t>& dropped_tokens, const size_t group_limit, const std::vector<std::string>& group_by_fields, const bool group_missing_values, bool prioritize_exact_match, const bool search_all_candidates, const bool prioritize_num_matching_fields, filter_result_iterator_t* const filter_result_iterator, const uint32_t total_cost, const int syn_orig_num_tokens, const uint32_t* excluded_result_ids, size_t excluded_result_ids_size, const std::unordered_set<uint32_t>& excluded_group_ids, const int* sort_order, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values, const std::vector<size_t>& geopoint_indices, std::vector<uint32_t>& id_buff, size_t& num_keyword_matches, uint32_t*& all_result_ids, size_t& all_result_ids_len, const std::string& collection_name = "") const; void process_filter_overrides(const std::vector<const override_t*>& filter_overrides, std::vector<std::string>& query_tokens, token_ordering token_order, filter_node_t*& filter_tree_root, std::vector<const override_t*>& matched_dynamic_overrides, nlohmann::json& override_metadata, bool enable_typos_for_numerical_tokens, bool enable_typos_for_alpha_numerical_tokens) const; Option<bool> compute_sort_scores(const std::vector<sort_by>& sort_fields, const int* sort_order, std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values, const std::vector<size_t>& geopoint_indices, uint32_t seq_id, const std::map<basic_string<char>, reference_filter_result_t>& references, std::vector<uint32_t>& filter_indexes, int64_t max_field_match_score, int64_t* scores, int64_t& match_score_index, bool& should_skip, float vector_distance = 0, const std::string& collection_name = "") const; void process_curated_ids(const std::vector<std::pair<uint32_t, uint32_t>>& included_ids, const std::vector<uint32_t>& excluded_ids, const std::vector<std::string>& group_by_fields, const size_t group_limit, const bool group_missing_values, const bool filter_curated_hits, filter_result_iterator_t* const filter_result_iterator, std::set<uint32_t>& curated_ids, std::map<size_t, std::map<size_t, uint32_t>>& included_ids_map, std::vector<uint32_t>& included_ids_vec, std::unordered_set<uint32_t>& excluded_group_ids) const; int64_t get_doc_val_from_sort_index(sort_index_iterator it, uint32_t doc_seq_id) const; Option<bool> seq_ids_outside_top_k(const std::string& field_name, size_t k, std::vector<uint32_t>& outside_seq_ids); Option<bool> get_related_ids(const std::string& field_name, const uint32_t& seq_id, std::vector<uint32_t>& result) const; Option<bool> get_object_array_related_id(const std::string& collection_name, const std::string& reference_helper_field_name, const uint32_t& seq_id, const uint32_t& object_index, uint32_t& result) const; Option<uint32_t> get_sort_index_value_with_lock(const std::string& field_name, const uint32_t& seq_id) const; friend class filter_result_iterator_t; void repair_hnsw_index(); void aggregate_facet(const size_t group_limit, facet& this_facet, facet& acc_facet) const; Option<int64_t> get_geo_distance_with_lock(const std::string& geo_field_name, const uint32_t& seq_id, const S2LatLng& reference_lat_lng, const bool& round_distance = false) const; Option<int64_t> get_referenced_geo_distance(const sort_by& sort_field, uint32_t seq_id, const std::map<basic_string<char>, reference_filter_result_t>& references, const S2LatLng& reference_lat_lng, const bool& round_distance = false) const; Option<uint32_t> get_ref_seq_id(const sort_by& sort_field, const uint32_t& seq_id, const std::map<std::string, reference_filter_result_t>& references, std::string& ref_collection_name) const; void get_top_k_result_ids(const std::vector<std::vector<KV*>>& raw_result_kvs, std::vector<uint32_t>& result_ids) const; void compute_aux_scores(Topster *topster, const std::vector<search_field_t>& the_fields, const std::vector<token_t>& query_tokens, uint16_t search_query_size, const std::vector<sort_by>& sort_fields_std, const int* sort_order, const vector_query_t& vector_query) const; float compute_decay_function_score(const sort_by& sort_field, uint32_t seq_id) const; }; template<class T> void Index::iterate_and_index_numerical_field(std::vector<index_record>& iter_batch, const field& afield, T func) { for(auto& record: iter_batch) { if(!record.indexed.ok()) { continue; } const auto& document = record.doc; const auto seq_id = record.seq_id; if (document.count(afield.name) == 0 || !afield.index) { continue; } try { func(record, seq_id); } catch(const std::exception &e) { LOG(INFO) << "Error while indexing numerical field." << e.what(); record.index_failure(400, e.what()); } } }
60,638
C++
.h
906
47.357616
176
0.552795
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,863
string_utils.h
typesense_typesense/include/string_utils.h
#pragma once #include <string> #include <algorithm> #include <sstream> #include <ctype.h> #include <vector> #include <random> #include <map> #include <queue> #include "wyhash_v5.h" #include <unicode/normalizer2.h> #include <set> #include "option.h" struct StringUtils { // non-deletable singleton const icu::Normalizer2* nfkd; StringUtils(); ~StringUtils(); static size_t split_facet(const std::string& s, std::vector<std::string> & result, const bool keep_empty = false, const size_t start_index = 0, const size_t max_values = (std::numeric_limits<size_t>::max()-1)); // Adapted from: http://stackoverflow.com/a/236180/131050 static size_t split(const std::string& s, std::vector<std::string> & result, const std::string& delim, const bool keep_empty = false, const bool trim_space = true, const size_t start_index = 0, const size_t max_values = std::numeric_limits<size_t>::max()) { if (delim.empty()) { result.push_back(s); return s.size(); } std::string::const_iterator substart = s.begin()+start_index, subend; size_t end_index = start_index; while (true) { subend = std::search(substart, s.end(), delim.begin(), delim.end()); std::string temp(substart, subend); end_index += temp.size() + delim.size(); if(trim_space) { temp = trim(temp); } if (keep_empty || !temp.empty()) { result.push_back(temp); } if(result.size() == max_values) { break; } if (subend == s.end()) { break; } substart = subend + delim.size(); } return std::min(end_index, s.size()); } static std::string join(std::vector<std::string> vec, const std::string& delimiter, size_t start_index = 0) { std::stringstream ss; for(size_t i = start_index; i < vec.size(); i++) { if(i != start_index) { ss << delimiter; } ss << vec[i]; } return ss.str(); } static void split_to_values(const std::string& vals_str, std::vector<std::string>& filter_values); // Adapted from: http://stackoverflow.com/a/36000453/131050 static std::string & trim(std::string & str) { // right trim while (str.length () > 0 && (str [str.length ()-1] == ' ')) { str.erase (str.length ()-1, 1); } // left trim while (str.length () > 0 && (str [0] == ' ')) { str.erase (0, 1); } return str; } // URL decoding - adapted from: http://stackoverflow.com/a/32595923/131050 static char from_hex(char ch) { return isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10; } // Convert string of chars to its representative string of hex numbers static std::string str2hex(const std::string& str, bool capital = false); static std::string url_decode(const std::string& text) { char h; std::ostringstream escaped; escaped.fill('0'); for (auto i = text.begin(), n = text.end(); i != n; ++i) { std::string::value_type c = (*i); if (c == '%') { if (i[1] && i[2]) { h = from_hex(i[1]) << 4 | from_hex(i[2]); escaped << h; i += 2; } } else if (c == '+') { escaped << ' '; } else { escaped << c; } } return escaped.str(); } static bool is_float(const std::string &s) { try { size_t num_chars_processed = 0; std::stof(s, &num_chars_processed); return num_chars_processed == s.size(); } catch(...) { return false; } } // Adapted from: http://stackoverflow.com/a/2845275/131050 static bool is_integer(const std::string &s) { if(s.empty() || ((!isdigit(s[0])) && (s[0] != '-') && (s[0] != '+'))) { return false; } char * p ; strtol(s.c_str(), &p, 10); return (*p == 0); } static bool is_positive_integer(const std::string& s) { return !s.empty() && s.find_first_not_of("0123456789") == std::string::npos; } // Adapted from: http://stackoverflow.com/a/2845275/131050 static bool is_uint64_t(const std::string &s) { if(s.empty()) { return false; } char * p ; unsigned long long ull = strtoull(s.c_str(), &p, 10); return (*p == 0) && ull <= std::numeric_limits<uint64_t>::max(); } static bool is_int64_t(const std::string &s) { if(s.empty()) { return false; } char * p ; long long val = strtoll(s.c_str(), &p, 10); return (*p == 0) && val >= std::numeric_limits<int64_t>::min() && val <= std::numeric_limits<int64_t>::max(); } static bool is_uint32_t(const std::string &s) { if(s.empty()) { return false; } char * p ; unsigned long ul = strtoul(s.c_str(), &p, 10); return (*p == 0) && ul <= std::numeric_limits<uint32_t>::max(); } static bool is_int32_t(const std::string &s) { if(s.empty()) { return false; } char * p ; long val = strtol(s.c_str(), &p, 10); return (*p == 0) && val >= std::numeric_limits<int32_t>::min() && val <= std::numeric_limits<int32_t>::max(); } static bool is_bool(std::string &s) { if(s.empty()) { return false; } StringUtils::tolowercase(s); return s == "true" || s == "false"; } static void toupper(std::string& str) { std::transform(str.begin(), str.end(), str.begin(), ::toupper); } static void tolowercase(std::string& str) { std::transform(str.begin(), str.end(), str.begin(), ::tolower); } /* https://stackoverflow.com/a/34571089/131050 */ static std::string base64_encode(const std::string &in) { std::string out; int val=0, valb=-6; for(unsigned char c : in) { val = (val<<8) + c; valb += 8; while (valb>=0) { out.push_back("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[(val>>valb)&0x3F]); valb-=6; } } if(valb>-6) { out.push_back("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[((val<<8)>>(valb+8))&0x3F]); } while(out.size()%4) { out.push_back('='); } return out; } static std::string base64_decode(const std::string &in) { std::string out; std::vector<int> T(256,-1); for(int i=0; i<64; i++) { T["ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[i]] = i; } int val=0, valb=-8; for(unsigned char c : in) { if (T[c] == -1) break; val = (val<<6) + T[c]; valb += 6; if (valb>=0) { out.push_back(char((val>>valb)&0xFF)); valb-=8; } } return out; } static std::string serialize_uint32_t(uint32_t num) { unsigned char bytes[4]; bytes[0] = (unsigned char) ((num >> 24) & 0xFF); bytes[1] = (unsigned char) ((num >> 16) & 0xFF); bytes[2] = (unsigned char) ((num >> 8) & 0xFF); bytes[3] = (unsigned char) ((num & 0xFF)); return std::string(bytes, bytes+4); } static std::string serialize_uint64_t(uint64_t num) { unsigned char bytes[8]; bytes[0] = (unsigned char) ((num >> 56) & 0xFF); bytes[1] = (unsigned char) ((num >> 48) & 0xFF); bytes[2] = (unsigned char) ((num >> 40) & 0xFF); bytes[3] = (unsigned char) ((num >> 32) & 0xFF); bytes[4] = (unsigned char) ((num >> 24) & 0xFF); bytes[5] = (unsigned char) ((num >> 16) & 0xFF); bytes[6] = (unsigned char) ((num >> 8) & 0xFF); bytes[7] = (unsigned char) ((num & 0xFF)); return std::string(bytes, bytes+8); } static uint32_t deserialize_uint32_t(std::string serialized_num) { uint32_t seq_id = ((serialized_num[0] & 0xFF) << 24) | ((serialized_num[1] & 0xFF) << 16) | ((serialized_num[2] & 0xFF) << 8) | (serialized_num[3] & 0xFF); return seq_id; } static uint64_t hash_wy(const void* key, uint64_t len) { uint64_t hash = wyhash(key, len, 0, _wyp); // reserve max() for use as a delimiter return hash != std::numeric_limits<uint64_t>::max() ? hash : (std::numeric_limits<uint64_t>::max()-1); } // reference: https://stackoverflow.com/a/27952689/131050 static uint64_t hash_combine(uint64_t combined, uint64_t hash) { combined ^= hash + 0x517cc1b727220a95 + (combined << 6) + (combined >> 2); return combined; } std::string unicode_nfkd(const std::string& text); static std::string randstring(size_t length); static std::string hmac(const std::string& key, const std::string& msg); static std::string hash_sha256(const std::string& str); //static size_t unicode_length(const std::string& bytes); static bool begins_with(const std::string& str, const std::string& prefix) { return str.rfind(prefix, 0) == 0; } static void parse_query_string(const std::string& query, std::map<std::string, std::string>& query_map); static std::string float_to_str(float value); static void replace_all(std::string& subject, const std::string& search, const std::string& replace); static void erase_char(std::string& str, const char c); static std::string trim_curly_spaces(const std::string& str); static bool ends_with(std::string const &str, std::string const &ending); static bool contains_word(const std::string& haystack, const std::string& needle); static char* get_ip_str(const struct sockaddr* sa, char* s, size_t maxlen); static size_t get_num_chars(const std::string& text); static Option<bool> tokenize_filter_query(const std::string& filter_query, std::queue<std::string>& tokens); static Option<bool> split_include_exclude_fields(const std::string& include_exclude_fields, std::vector<std::string>& tokens); static size_t get_occurence_count(const std::string& str, char symbol); };
10,762
C++
.h
264
31.280303
121
0.539488
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,864
filter_result_iterator.h
typesense_typesense/include/filter_result_iterator.h
#pragma once #include <string> #include <map> #include <utility> #include <vector> #include <memory> #include "num_tree.h" #include "option.h" #include "posting_list.h" #include "id_list.h" class Index; struct filter_node_t; struct reference_filter_result_t { uint32_t count = 0; uint32_t* docs = nullptr; bool is_reference_array_field = true; // In case of nested join, references can further have references. std::map<std::string, reference_filter_result_t>* coll_to_references = nullptr; explicit reference_filter_result_t(uint32_t count = 0, uint32_t* docs = nullptr, bool is_reference_array_field = true) : count(count), docs(docs), is_reference_array_field(is_reference_array_field) {} reference_filter_result_t(const reference_filter_result_t& obj) { if (&obj == this) { return; } count = obj.count; docs = new uint32_t[count]; memcpy(docs, obj.docs, count * sizeof(uint32_t)); is_reference_array_field = obj.is_reference_array_field; copy_references(obj, *this); } reference_filter_result_t& operator=(const reference_filter_result_t& obj) noexcept { if (&obj == this) { return *this; } count = obj.count; docs = new uint32_t[count]; memcpy(docs, obj.docs, count * sizeof(uint32_t)); is_reference_array_field = obj.is_reference_array_field; copy_references(obj, *this); return *this; } reference_filter_result_t& operator=(reference_filter_result_t&& obj) noexcept { if (&obj == this) { return *this; } count = obj.count; docs = obj.docs; coll_to_references = obj.coll_to_references; is_reference_array_field = obj.is_reference_array_field; // Set default values in obj. obj.count = 0; obj.docs = nullptr; obj.coll_to_references = nullptr; obj.is_reference_array_field = true; return *this; } ~reference_filter_result_t() { delete[] docs; delete[] coll_to_references; } static void copy_references(const reference_filter_result_t& from, reference_filter_result_t& to); }; struct single_filter_result_t { uint32_t seq_id = 0; // Collection name -> Reference filter result std::map<std::string, reference_filter_result_t> reference_filter_results = {}; bool is_reference_array_field = true; single_filter_result_t() = default; single_filter_result_t(uint32_t seq_id, std::map<std::string, reference_filter_result_t>&& reference_filter_results, bool is_reference_array_field = true) : seq_id(seq_id), reference_filter_results(std::move(reference_filter_results)), is_reference_array_field(is_reference_array_field) {} single_filter_result_t(const single_filter_result_t& obj) { if (&obj == this) { return; } seq_id = obj.seq_id; is_reference_array_field = obj.is_reference_array_field; // Copy every collection's reference. for (const auto &item: obj.reference_filter_results) { auto& ref_coll_name = item.first; reference_filter_results[ref_coll_name] = item.second; } } single_filter_result_t(single_filter_result_t&& obj) { if (&obj == this) { return; } seq_id = obj.seq_id; is_reference_array_field = obj.is_reference_array_field; reference_filter_results = std::move(obj.reference_filter_results); } single_filter_result_t& operator=(const single_filter_result_t& obj) noexcept { if (&obj == this) { return *this; } seq_id = obj.seq_id; is_reference_array_field = obj.is_reference_array_field; // Copy every collection's reference. for (const auto &item: obj.reference_filter_results) { auto& ref_coll_name = item.first; reference_filter_results[ref_coll_name] = item.second; } return *this; } single_filter_result_t& operator=(single_filter_result_t&& obj) noexcept { if (&obj == this) { return *this; } seq_id = obj.seq_id; is_reference_array_field = obj.is_reference_array_field; reference_filter_results = std::move(obj.reference_filter_results); return *this; } }; struct filter_result_t { uint32_t count = 0; uint32_t* docs = nullptr; // Collection name -> Reference filter result std::map<std::string, reference_filter_result_t>* coll_to_references = nullptr; filter_result_t() = default; filter_result_t(uint32_t count, uint32_t* docs) : count(count), docs(docs) {} filter_result_t(const filter_result_t& obj) { if (&obj == this) { return; } count = obj.count; docs = new uint32_t[count]; memcpy(docs, obj.docs, count * sizeof(uint32_t)); copy_references(obj, *this); } filter_result_t& operator=(const filter_result_t& obj) noexcept { if (&obj == this) { return *this; } count = obj.count; docs = new uint32_t[count]; memcpy(docs, obj.docs, count * sizeof(uint32_t)); copy_references(obj, *this); return *this; } filter_result_t& operator=(filter_result_t&& obj) noexcept { if (&obj == this) { return *this; } count = obj.count; docs = obj.docs; coll_to_references = obj.coll_to_references; // Set default values in obj. obj.count = 0; obj.docs = nullptr; obj.coll_to_references = nullptr; return *this; } ~filter_result_t() { delete[] docs; delete[] coll_to_references; } static void and_filter_results(const filter_result_t& a, const filter_result_t& b, filter_result_t& result); static void or_filter_results(const filter_result_t& a, const filter_result_t& b, filter_result_t& result); static void copy_references(const filter_result_t& from, filter_result_t& to); }; #ifdef TEST_BUILD constexpr uint16_t function_call_modulo = 10; constexpr uint16_t string_filter_ids_threshold = 3; constexpr uint16_t bool_filter_ids_threshold = 3; constexpr uint16_t numeric_filter_ids_threshold = 3; #else constexpr uint16_t function_call_modulo = 16'384; constexpr uint16_t string_filter_ids_threshold = 20'000; constexpr uint16_t bool_filter_ids_threshold = 20'000; constexpr uint16_t numeric_filter_ids_threshold = 20'000; #endif struct filter_result_iterator_timeout_info { filter_result_iterator_timeout_info(uint64_t search_begin_us, uint64_t search_stop_us); filter_result_iterator_timeout_info(const filter_result_iterator_timeout_info& obj) { function_call_counter = obj.function_call_counter; search_begin_us = obj.search_begin_us; search_stop_us = obj.search_stop_us; } uint16_t function_call_counter = 0; uint64_t search_begin_us = 0; uint64_t search_stop_us = UINT64_MAX; }; class filter_result_iterator_t { private: std::string collection_name; const Index* index = nullptr; const filter_node_t* filter_node = nullptr; filter_result_iterator_t* left_it = nullptr; filter_result_iterator_t* right_it = nullptr; /// Used in case of id and reference filter. uint32_t result_index = 0; /// Initialized in case of `id: *` filter. id_list_t::iterator_t all_seq_ids_iterator = id_list_t::iterator_t(nullptr, nullptr, nullptr, false); /// Stores the result of the filters that cannot be iterated. filter_result_t filter_result; bool is_filter_result_initialized = false; /// Initialized in case of filter on string field. /// Sample filter values: ["foo bar", "baz"]. Each filter value is split into tokens. We get posting list iterator /// for each token. /// /// Multiple filter values: Multiple tokens: posting list iterator std::vector<std::vector<posting_list_t*>> posting_lists; std::vector<std::vector<posting_list_t::iterator_t>> posting_list_iterators; std::vector<posting_list_t*> expanded_plists; /// Controls the number of similar words that Typesense considers during fuzzy search for filter_by values. size_t max_filter_by_candidates; bool is_not_equals_iterator = false; uint32_t equals_iterator_id = 0; bool is_equals_iterator_valid = true; uint32_t last_valid_id = 0; /// Used in case of a single boolean filter matching more than `bool_filter_ids_threshold` ids. num_tree_t::iterator_t bool_iterator = num_tree_t::iterator_t(nullptr, NUM_COMPARATOR::EQUALS, 0); /// Initialized in case of filter on a numeric field. /// Sample filter: [10..100, 150]. Operators other than `=` and `!` can match more than one values. We get id list /// iterator for each value. /// /// Multiple filters: Multiple values: id list iterator std::vector<std::vector<id_list_t*>> id_lists; std::vector<std::vector<id_list_t::iterator_t>> id_list_iterators; std::vector<id_list_t*> expanded_id_lists; /// Stores the the current seq_id of filter values. std::vector<uint32_t> seq_ids; /// Numerical filters can have `!` operator individually. /// Sample filter: [>10, !15]. std::unordered_set<uint32_t> numerical_not_iterator_index; /// String filter can specify prefix value match. /// Sample filter: [Chris P*]. std::unordered_set<uint32_t> string_prefix_filter_index; bool delete_filter_node = false; std::unique_ptr<filter_result_iterator_timeout_info> timeout_info; /// Initializes the state of iterator node after it's creation. void init(const bool& enable_lazy_evaluation = false); /// Performs AND on the subtrees of operator. void and_filter_iterators(); /// Performs OR on the subtrees of operator. void or_filter_iterators(); /// Advances all the token iterators that are at seq_id. void advance_string_filter_token_iterators(); /// Finds the next match for a filter on string field. void get_string_filter_next_match(const bool& field_is_array); /// Advances all the iterators that are at seq_id. void advance_numeric_filter_iterators(); /// Computes the match for a filter on numeric field. void get_numeric_filter_match(const bool init = false); explicit filter_result_iterator_t(uint32_t approx_filter_ids_length); /// Collects n doc ids while advancing the iterator. The iterator may become invalid during this operation. /// **The references are moved from filter_result_iterator_t. void get_n_ids(const uint32_t& n, filter_result_t*& result, const bool& override_timeout = false); /// Updates `validity` of the iterator to `timed_out` if condition is met. Assumes `timeout_info` is not null. inline bool is_timed_out(const bool& override_function_call_counter = false); /// Advances the iterator until the doc value reaches or just overshoots id. The iterator may become invalid during /// this operation. void skip_to(uint32_t id); public: uint32_t seq_id = 0; /// Collection name -> references std::map<std::string, reference_filter_result_t> reference; /// In case of a complex filter query, validity of a node is dependent on it's sub-nodes. enum {timed_out = -1, invalid, valid} validity = valid; /// Initialization status of the iterator. Option<bool> status = Option(true); /// Holds the upper-bound of the number of seq ids this iterator would match. /// Useful in a scenario where we need to differentiate between filter iterator not matching any document v/s filter /// iterator reaching it's end. (is_valid would be false in both these cases) uint32_t approx_filter_ids_length = 0; filter_result_iterator_t() = default; explicit filter_result_iterator_t(uint32_t* ids, const uint32_t& ids_count, const size_t& max_candidates = DEFAULT_FILTER_BY_CANDIDATES, uint64_t search_begin_us = 0, uint64_t search_stop_us = UINT64_MAX); explicit filter_result_iterator_t(const std::string& collection_name, Index const* const index, filter_node_t const* const filter_node, const bool& enable_lazy_evaluation = false, const size_t& max_candidates = DEFAULT_FILTER_BY_CANDIDATES, uint64_t search_begin_us = 0, uint64_t search_stop_us = UINT64_MAX); ~filter_result_iterator_t(); filter_result_iterator_t& operator=(filter_result_iterator_t&& obj) noexcept; /// Returns the status of the initialization of iterator tree. Option<bool> init_status(); /// Recursively computes the result of each node and stores the final result in the root node. void compute_iterators(); /// Handles moving the individual iterators to id internally and checks if `id` matches the filter. /// /// \return /// 0 : id is not valid /// 1 : id is valid /// -1: end of iterator / timed out [[nodiscard]] int is_valid(uint32_t id, const bool& override_timeout = false); /// Advances the iterator to get the next value of doc and reference. The iterator may become invalid during this /// operation. /// /// Should only be called after calling `compute_iterators()` or in conjunction with `is_valid(id)` when it returns `1`. void next(); /// Collects n doc ids while advancing the iterator. The ids present in excluded_result_ids are ignored. The /// iterator may become invalid during this operation. **The references are moved from filter_result_iterator_t. void get_n_ids(const uint32_t& n, uint32_t& excluded_result_index, uint32_t const* const excluded_result_ids, const size_t& excluded_result_ids_size, filter_result_t*& result, const bool& override_timeout = false); /// Returns true if at least one id from the posting list object matches the filter. bool contains_atleast_one(const void* obj); /// Returns to the initial state of the iterator. void reset(const bool& override_timeout = false); /// Copies filter ids from `filter_result` into `filter_array`. /// /// Should only be called after calling `compute_iterators()`. /// /// \return size of the filter array uint32_t to_filter_id_array(uint32_t*& filter_array); /// Performs AND with the contents of A and allocates a new array of results. /// \return size of the results array uint32_t and_scalar(const uint32_t* A, const uint32_t& lenA, uint32_t*& results); void and_scalar(const uint32_t* A, const uint32_t& lenA, filter_result_t& result); static void add_phrase_ids(filter_result_iterator_t*& filter_result_iterator, uint32_t* phrase_result_ids, const uint32_t& phrase_result_count); [[nodiscard]] bool _get_is_filter_result_initialized() const { return is_filter_result_initialized; } [[nodiscard]] filter_result_iterator_t* _get_left_it() const { return left_it; } [[nodiscard]] filter_result_iterator_t* _get_right_it() const { return right_it; } [[nodiscard]] uint32_t _get_equals_iterator_id() const { return equals_iterator_id; } [[nodiscard]] bool _get_is_equals_iterator_valid() const { return is_equals_iterator_valid; } inline bool is_filter_provided() const { return filter_node != nullptr; } };
15,904
C++
.h
336
39.705357
124
0.65313
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,865
numeric_range_trie_test.h
typesense_typesense/include/numeric_range_trie_test.h
#pragma once #include <map> #include "sorted_array.h" constexpr short EXPANSE = 256; class NumericTrie { char max_level = 4; class Node { Node** children = nullptr; sorted_array seq_ids; void insert_helper(const int64_t& value, const uint32_t& seq_id, char& level, const char& max_level); void insert_geopoint_helper(const uint64_t& cell_id, const uint32_t& seq_id, char& level, const char& max_level); void search_geopoints_helper(const uint64_t& cell_id, const char& max_index_level, std::set<Node*>& matches); void search_range_helper(const int64_t& low,const int64_t& high, const char& max_level, std::vector<Node*>& matches); void search_less_than_helper(const int64_t& value, char& level, const char& max_level, std::vector<Node*>& matches); void search_greater_than_helper(const int64_t& value, char& level, const char& max_level, std::vector<Node*>& matches); public: ~Node() { if (children != nullptr) { for (auto i = 0; i < EXPANSE; i++) { delete children[i]; } } delete [] children; } void insert(const int64_t& cell_id, const uint32_t& seq_id, const char& max_level); void remove(const int64_t& cell_id, const uint32_t& seq_id, const char& max_level); void insert_geopoint(const uint64_t& cell_id, const uint32_t& seq_id, const char& max_level); void search_geopoints(const std::vector<uint64_t>& cell_ids, const char& max_level, std::vector<uint32_t>& geo_result_ids); void delete_geopoint(const uint64_t& cell_id, uint32_t id, const char& max_level); void get_all_ids(uint32_t*& ids, uint32_t& ids_length); void search_range(const int64_t& low, const int64_t& high, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_range(const int64_t& low, const int64_t& high, const char& max_level, std::vector<Node*>& matches); void search_less_than(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_less_than(const int64_t& value, const char& max_level, std::vector<Node*>& matches); void search_greater_than(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_greater_than(const int64_t& value, const char& max_level, std::vector<Node*>& matches); void search_equal_to(const int64_t& value, const char& max_level, uint32_t*& ids, uint32_t& ids_length); void search_equal_to(const int64_t& value, const char& max_level, std::vector<Node*>& matches); }; Node* negative_trie = nullptr; Node* positive_trie = nullptr; public: explicit NumericTrie(char num_bits = 32) { max_level = num_bits / 8; } ~NumericTrie() { delete negative_trie; delete positive_trie; } class iterator_t { struct match_state { uint32_t* ids = nullptr; uint32_t ids_length = 0; uint32_t index = 0; explicit match_state(uint32_t*& ids, uint32_t& ids_length) : ids(ids), ids_length(ids_length) {} ~match_state() { delete [] ids; } }; std::vector<match_state*> matches; void set_seq_id(); public: explicit iterator_t(std::vector<Node*>& matches); ~iterator_t() { for (auto& match: matches) { delete match; } } iterator_t& operator=(iterator_t&& obj) noexcept; uint32_t seq_id = 0; bool is_valid = true; void next(); void skip_to(uint32_t id); void reset(); }; void insert(const int64_t& value, const uint32_t& seq_id); void remove(const int64_t& value, const uint32_t& seq_id); void insert_geopoint(const uint64_t& cell_id, const uint32_t& seq_id); void search_geopoints(const std::vector<uint64_t>& cell_ids, std::vector<uint32_t>& geo_result_ids); void delete_geopoint(const uint64_t& cell_id, uint32_t id); void search_range(const int64_t& low, const bool& low_inclusive, const int64_t& high, const bool& high_inclusive, uint32_t*& ids, uint32_t& ids_length); iterator_t search_range(const int64_t& low, const bool& low_inclusive, const int64_t& high, const bool& high_inclusive); void search_less_than(const int64_t& value, const bool& inclusive, uint32_t*& ids, uint32_t& ids_length); iterator_t search_less_than(const int64_t& value, const bool& inclusive); void search_greater_than(const int64_t& value, const bool& inclusive, uint32_t*& ids, uint32_t& ids_length); iterator_t search_greater_than(const int64_t& value, const bool& inclusive); void search_equal_to(const int64_t& value, uint32_t*& ids, uint32_t& ids_length); iterator_t search_equal_to(const int64_t& value); };
5,256
C++
.h
99
42.111111
121
0.604665
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,866
image_embedder.h
typesense_typesense/include/image_embedder.h
#pragma once #include <memory> #include <vector> #include <mutex> #include <core/session/onnxruntime_cxx_api.h> #include "image_processor.h" #include "text_embedder_remote.h" enum class ImageEmbedderType { clip }; class ImageEmbedder { public: virtual embedding_res_t embed(const std::string& image_encoded) = 0; virtual std::vector<embedding_res_t> batch_embed(const std::vector<std::string>& inputs) = 0; virtual ~ImageEmbedder() = default; virtual ImageEmbedderType get_image_embedder_type() = 0; }; class CLIPImageEmbedder : public ImageEmbedder { private: // use shared session with text embedder std::shared_ptr<Ort::Session> session_; std::shared_ptr<Ort::Env> env_; std::mutex mutex_; CLIPImageProcessor image_processor_; public: CLIPImageEmbedder(const std::shared_ptr<Ort::Session>& session, const std::shared_ptr<Ort::Env>& env, const std::string& model_path); embedding_res_t embed(const std::string& image_encoded) override; std::vector<embedding_res_t> batch_embed(const std::vector<std::string>& inputs) override; virtual ImageEmbedderType get_image_embedder_type() override { return ImageEmbedderType::clip; } };
1,280
C++
.h
32
34.3125
141
0.691874
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,867
auth_manager.h
typesense_typesense/include/auth_manager.h
#pragma once #include <string> #include <vector> #include <map> #include <mutex> #include <shared_mutex> #include <tsl/htrie_map.h> #include "json.hpp" #include "option.h" #include "store.h" struct api_key_t { uint32_t id; std::string value; std::string description; std::vector<std::string> actions; std::vector<std::string> collections; uint64_t expires_at; bool autodelete; static constexpr const size_t PREFIX_LEN = 4; static constexpr const uint64_t FAR_FUTURE_TIMESTAMP = 64723363199; // year 4020 api_key_t() { } api_key_t(const std::string &value, const std::string &description, const std::vector<std::string> &actions, const std::vector<std::string> &collections, uint64_t expires_at, bool autodel=false) : value(value), description(description), actions(actions), collections(collections), expires_at(expires_at), autodelete(autodel) { } Option<bool> load(const std::string & json_str) { nlohmann::json key_obj; try { key_obj = nlohmann::json::parse(json_str); } catch(...) { return Option<bool>(500, "Error while parsing JSON string."); } id = key_obj["id"]; value = key_obj["value"]; description = key_obj["description"].get<std::string>(); actions = key_obj["actions"].get<std::vector<std::string>>(); collections = key_obj["collections"].get<std::vector<std::string>>(); // handle optional fields if(key_obj.count("expires_at") != 0) { expires_at = key_obj["expires_at"]; } else { expires_at = FAR_FUTURE_TIMESTAMP; } if(key_obj.count("autodelete") != 0) { autodelete = key_obj["autodelete"].get<bool>(); } else { autodelete = false; } return Option<bool>(true); } static Option<uint32_t> validate(const nlohmann::json & key_obj); nlohmann::json to_json() const { nlohmann::json obj; obj["id"] = id; obj["value"] = value; obj["description"] = description; obj["actions"] = actions; obj["collections"] = collections; obj["expires_at"] = expires_at; obj["autodelete"] = autodelete; return obj; } api_key_t& truncate_value() { value = value.substr(0, PREFIX_LEN); // return only first 4 chars return (*this); } }; struct collection_key_t { std::string collection; std::string api_key; explicit collection_key_t(const std::string& collection, const std::string& api_key): collection(collection), api_key(api_key) { } }; class AuthManager { private: mutable std::shared_mutex mutex; tsl::htrie_map<char, api_key_t> api_keys; // stores key_value => key mapping Store *store; std::string bootstrap_auth_key; // Auto incrementing API KEY ID std::atomic<uint32_t> next_api_key_id; // Using a $ prefix so that these meta keys stay above record entries in a lexicographically ordered KV store static constexpr const char* API_KEY_NEXT_ID_KEY = "$KN"; static constexpr const char* API_KEYS_PREFIX = "$KP"; uint32_t get_next_api_key_id(); static constexpr const char* DOCUMENTS_SEARCH_ACTION = "documents:search"; static std::string fmt_error(std::string&& error, const std::string& key); Option<bool> authenticate_parse_params(const collection_key_t& scoped_api_key, const std::string& action, nlohmann::json& embedded_params) const ; bool auth_against_key(const std::string& req_collection, const std::string& action, const api_key_t &api_key, const bool search_only) const; static bool regexp_match(const std::string& value, const std::string& regexp); void remove_expired_keys(); public: static const size_t GENERATED_KEY_LEN = 32; static const size_t HMAC_BASE64_LEN = 44; AuthManager() = default; Option<bool> init(Store* store, const std::string& bootstrap_auth_key); Option<std::vector<api_key_t>> list_keys() const; Option<api_key_t> get_key(uint32_t id, bool truncate_value = true) const; Option<api_key_t> create_key(api_key_t& api_key); Option<api_key_t> remove_key(uint32_t id); bool authenticate(const std::string& action, const std::vector<collection_key_t>& collection_keys, std::map<std::string, std::string>& params, std::vector<nlohmann::json>& embedded_params_vec) const; static bool add_item_to_params(std::map<std::string, std::string> &req_params, const nlohmann::detail::iteration_proxy_value<nlohmann::json::iterator>& item, bool overwrite); void do_housekeeping(); std::vector<std::string> get_api_key_collections(const std::string& value); };
4,979
C++
.h
115
35.304348
119
0.628139
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,868
override.h
typesense_typesense/include/override.h
#pragma once #include <string> #include <json.hpp> #include "option.h" struct override_t { static const std::string MATCH_EXACT; static const std::string MATCH_CONTAINS; struct rule_t { std::string query; std::string normalized_query; // not actually stored, used for lowercasing etc. std::string match; bool dynamic_query = false; std::string filter_by; std::set<std::string> tags; }; struct add_hit_t { std::string doc_id; uint32_t position = 0; }; struct drop_hit_t { std::string doc_id; }; std::string id; rule_t rule; std::vector<add_hit_t> add_hits; std::vector<drop_hit_t> drop_hits; std::string filter_by; bool remove_matched_tokens = false; bool filter_curated_hits = false; bool stop_processing = true; std::string sort_by; std::string replace_query; nlohmann::json metadata; // epoch seconds int64_t effective_from_ts = -1; int64_t effective_to_ts = -1; override_t() = default; static Option<bool> parse(const nlohmann::json& override_json, const std::string& id, override_t& override, const std::string& locale = "", const std::vector<char>& symbols_to_index = {}, const std::vector<char>& token_separators = {} ); nlohmann::json to_json() const; };
1,504
C++
.h
45
24.955556
93
0.578547
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,869
batched_indexer.h
typesense_typesense/include/batched_indexer.h
#pragma once #include <unordered_map> #include <deque> #include "store.h" #include "http_data.h" #include "threadpool.h" #include "http_server.h" #include "tsconfig.h" class BatchedIndexer { private: struct req_res_t { uint64_t start_ts; std::string prev_req_body; // used to handle partial JSON documents caused by chunking std::shared_ptr<http_req> req; std::shared_ptr<http_res> res; uint64_t last_updated; uint32_t num_chunks; uint32_t next_chunk_index; // index where next read must begin bool is_complete; // whether the req has been written to store fully req_res_t(uint64_t start_ts, const std::string& prev_req_body, const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res, uint64_t last_updated, uint32_t num_chunks, uint32_t next_chunk_index, bool is_complete): start_ts(start_ts), prev_req_body(prev_req_body), req(req), res(res), last_updated(last_updated), num_chunks(num_chunks), next_chunk_index(next_chunk_index), is_complete(is_complete) { } req_res_t(): req(nullptr), res(nullptr), last_updated(0), num_chunks(0), next_chunk_index(0), is_complete(false) {}; }; struct await_t { std::mutex mcv; std::condition_variable cv; }; struct refq_entry { uint64_t queue_id; uint64_t start_ts; refq_entry(uint64_t qid, uint64_t sts): queue_id(qid), start_ts(sts) { } }; HttpServer* server; Store* store; Store* meta_store; const size_t num_threads; await_t* qmutuxes; std::vector<std::deque<uint64_t>> queues; std::unordered_map<std::string, std::unordered_set<std::string>> coll_to_references; await_t refq_wait; std::list<refq_entry> reference_q; /* Variables to be serialized on snapshot / --------------------------------------------------------- */ std::mutex mutex; std::map<uint64_t, req_res_t> req_res_map; std::atomic<int64_t> queued_writes = 0; /* ------------------------------------------------------- */ std::chrono::high_resolution_clock::time_point last_gc_run; std::atomic<bool> quit; std::shared_mutex pause_mutex; // Used to skip over a bad raft log entry which previously triggered a crash const static int64_t UNSET_SKIP_INDEX = -9999; std::atomic<int64_t> skip_index = UNSET_SKIP_INDEX; rocksdb::Iterator* skip_index_iter = nullptr; static constexpr const char* SKIP_INDICES_PREFIX = "$XP"; std::string skip_index_upper_bound_key = std::string(SKIP_INDICES_PREFIX) + "`"; // cannot inline this rocksdb::Slice* skip_index_iter_upper_bound = nullptr; // When set, all writes (both live and log serialized) are skipped with 422 response const std::atomic<bool>& skip_writes; const Config& config; static const size_t GC_INTERVAL_SECONDS = 60; static const size_t GC_PRUNE_MAX_SECONDS = 3600; static std::string get_req_prefix_key(uint64_t req_id); static std::string get_req_suffix_key(uint64_t req_id); public: static const constexpr char* RAFT_REQ_LOG_PREFIX = "$RL_"; BatchedIndexer(HttpServer* server, Store* store, Store* meta_store, size_t num_threads, const Config& config, const std::atomic<bool>& skip_writes); ~BatchedIndexer(); void enqueue(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res); int64_t get_queued_writes(); void run(); void stop(); void populate_skip_index(); void persist_applying_index(); void clear_skip_indices(); // requires external synchronization! void serialize_state(nlohmann::json& state); void load_state(const nlohmann::json& state); std::string get_collection_name(const std::shared_ptr<http_req>& req); std::shared_mutex& get_pause_mutex(); };
3,985
C++
.h
88
38.625
113
0.637095
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,870
option.h
typesense_typesense/include/option.h
#pragma once #include <stdint.h> #include <string> template <typename T=uint32_t> class Option { private: T value; bool is_ok; std::string error_msg; uint32_t error_code{}; public: explicit Option() = delete; explicit Option(const T & value): value(value), is_ok(true) { } Option(const uint32_t code, const std::string & error_msg): is_ok(false), error_msg(error_msg), error_code(code) { } Option(const Option &obj) { value = obj.value; is_ok = obj.is_ok; error_msg = obj.error_msg; error_code = obj.error_code; } Option& operator=(Option&& obj) noexcept { if (&obj == this) return *this; value = obj.value; is_ok = obj.is_ok; error_msg = obj.error_msg; error_code = obj.error_code; return *this; } bool ok() const { return is_ok; } T get() const { return value; } std::string error() const { return error_msg; } uint32_t code() const { return error_code; } };
1,086
C++
.h
44
18.568182
118
0.57561
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,871
cached_resource_stat.h
typesense_typesense/include/cached_resource_stat.h
#pragma once #include <cstdint> #include <atomic> #include <mutex> #include <chrono> #include <string> #include <sys/statvfs.h> class cached_resource_stat_t { public: enum resource_check_t { OK, OUT_OF_DISK, OUT_OF_MEMORY }; private: const static size_t REFRESH_INTERVAL_SECS = 5; std::atomic<uint64_t> last_checked_ts = 0; std::mutex m; resource_check_t resource_status = OK; cached_resource_stat_t() = default; ~cached_resource_stat_t() = default; static cached_resource_stat_t::resource_check_t get_resource_status(const std::string& data_dir_path, const int disk_used_max_percentage, const int memory_used_max_percentage); public: static cached_resource_stat_t& get_instance() { static cached_resource_stat_t instance; return instance; } // On Mac, we will only check for disk usage resource_check_t has_enough_resources(const std::string& data_dir_path, const int disk_used_max_percentage, const int memory_used_max_percentage); };
1,251
C++
.h
34
26.558824
105
0.583954
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,872
exectime.h
typesense_typesense/include/exectime.h
#pragma once #include <iostream> #include <chrono> #include "logger.h" class ExecTime { inline static std::chrono::time_point<std::chrono::high_resolution_clock> begin; public: static void start() { begin = std::chrono::high_resolution_clock::now(); } static void log(std::string operation) { long long int timeMicros = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::high_resolution_clock::now() - begin).count(); LOG(INFO) << "Time taken for " << operation << ": " << timeMicros << "us"; } };
576
C++
.h
16
31.1875
89
0.644007
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,873
embedder_manager.h
typesense_typesense/include/embedder_manager.h
#pragma once #include <memory> #include <filesystem> #include <mutex> #include <unordered_map> #include <openssl/md5.h> #include <fstream> #include "logger.h" #include "http_client.h" #include "option.h" #include "text_embedder.h" #include "image_embedder.h" struct text_embedding_model { std::string model_name; std::string model_md5; std::string vocab_file_name; std::string vocab_md5; std::string data_file_md5; std::string tokenizer_md5; std::string tokenizer_file_name; std::string image_processor_md5; std::string image_processor_file_name; TokenizerType tokenizer_type; std::string indexing_prefix = ""; std::string query_prefix = ""; bool has_image_embedder = false; text_embedding_model(const nlohmann::json& json); text_embedding_model() = default; }; // Singleton class class EmbedderManager { public: static EmbedderManager& get_instance(); EmbedderManager(EmbedderManager&&) = delete; EmbedderManager& operator=(EmbedderManager&&) = delete; EmbedderManager(const EmbedderManager&) = delete; EmbedderManager& operator=(const EmbedderManager&) = delete; Option<TextEmbedder*> get_text_embedder(const nlohmann::json& model_config); Option<ImageEmbedder*> get_image_embedder(const nlohmann::json& model_config); void delete_text_embedder(const std::string& model_path); void delete_all_text_embedders(); void delete_image_embedder(const std::string& model_path); void delete_all_image_embedders(); static const TokenizerType get_tokenizer_type(const nlohmann::json& model_config); const std::string get_indexing_prefix(const nlohmann::json& model_config); const std::string get_query_prefix(const nlohmann::json& model_config); static void set_model_dir(const std::string& dir); static const std::string& get_model_dir(); ~EmbedderManager(); inline static const std::string MODELS_REPO_URL = "https://models.typesense.org/public/"; inline static const std::string MODEL_CONFIG_FILE = "config.json"; inline static std::string model_dir = ""; static const std::string get_absolute_model_path(const std::string& model_name); static const std::string get_absolute_vocab_path(const std::string& model_name, const std::string& vocab_file_name); static const std::string get_absolute_config_path(const std::string& model_name); static const std::string get_model_url(const text_embedding_model& model); static const std::string get_model_data_url(const text_embedding_model& model); static const std::string get_vocab_url(const text_embedding_model& model); static Option<nlohmann::json> get_public_model_config(const std::string& model_name); static const std::string get_model_name_without_namespace(const std::string& model_name); static const std::string get_model_namespace(const std::string& model_name); static const std::string get_model_subdir(const std::string& model_name); static const bool check_md5(const std::string& file_path, const std::string& target_md5); Option<bool> download_public_model(const text_embedding_model& model); Option<bool> init_public_model(const std::string& model_name); bool is_public_model(const std::string& model_name); static bool is_remote_model(const std::string& model_name); Option<bool> validate_and_init_remote_model(const nlohmann::json& model_config, size_t& num_dims); Option<bool> validate_and_init_local_model(const nlohmann::json& model_config, size_t& num_dims); Option<bool> validate_and_init_model(const nlohmann::json& model_config, size_t& num_dims); Option<bool> update_remote_model_apikey(const nlohmann::json& model_config, const std::string& new_apikey); std::unordered_map<std::string, std::shared_ptr<TextEmbedder>> _get_text_embedders() { return text_embedders; } private: EmbedderManager() = default; std::unordered_map<std::string, std::shared_ptr<TextEmbedder>> text_embedders; std::unordered_map<std::string, std::shared_ptr<ImageEmbedder>> image_embedders; std::unordered_map<std::string, text_embedding_model> public_models; std::mutex text_embedders_mutex, image_embedders_mutex; static Option<std::string> get_namespace(const std::string& model_name); };
4,313
C++
.h
82
48.170732
120
0.737756
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,874
http_client.h
typesense_typesense/include/http_client.h
#pragma once #include <string> #include <map> #include <curl/curl.h> #include "http_data.h" #include "http_server.h" /* NOTE: This is a really primitive blocking client meant only for specific Typesense use cases. */ class HttpClient { private: static std::string api_key; static std::string ca_cert_path; HttpClient() = default; ~HttpClient() = default; static size_t curl_write(char *contents, size_t size, size_t nmemb, std::string *s); static size_t curl_write_async(char *buffer, size_t size, size_t nmemb, void* context); static size_t curl_write_stream(char *buffer, size_t size, size_t nmemb, void *context); static size_t curl_write_async_done(void* context, curl_socket_t item); static size_t curl_write_stream_done(void* context, curl_socket_t item); static size_t curl_write_download(void *ptr, size_t size, size_t nmemb, FILE *stream); static CURL* init_curl(const std::string& url, std::string& response, const size_t timeout_ms = 0); static CURL* init_curl_async(const std::string& url, deferred_req_res_t* req_res, curl_slist*& chunk, bool send_ts_api_header); static CURL* init_curl_stream(const std::string& url, async_stream_response_t& res, long timeout_ms); static size_t curl_req_send_callback(char* buffer, size_t size, size_t nitems, void *userdata); static long perform_curl(CURL *curl, std::map<std::string, std::string>& res_headers, struct curl_slist *chunk = nullptr, bool send_ts_api_header = false); public: static HttpClient & get_instance() { static HttpClient instance; return instance; } HttpClient(HttpClient const&) = delete; void operator=(HttpClient const&) = delete; void init(const std::string & api_key); static long download_file(const std::string& url, const std::string& file_path); static long get_response(const std::string& url, std::string& response, std::map<std::string, std::string>& res_headers, const std::unordered_map<std::string, std::string>& headers = {}, long timeout_ms=4000, bool send_ts_api_header = false); static long delete_response(const std::string& url, std::string& response, std::map<std::string, std::string>& res_headers, long timeout_ms=4000, bool send_ts_api_header = false); static long post_response(const std::string & url, const std::string & body, std::string & response, std::map<std::string, std::string>& res_headers, const std::unordered_map<std::string, std::string>& headers = {}, long timeout_ms=4000, bool send_ts_api_header = false); static long post_response_async(const std::string &url, const std::shared_ptr<http_req> request, const std::shared_ptr<http_res> response, HttpServer* server, bool send_ts_api_header = false); static long post_response_stream(const std::string &url, const std::string &body, async_stream_response_t &response, std::map<std::string, std::string>& res_headers, const std::unordered_map<std::string, std::string>& headers, long timeout_ms=4000); static long put_response(const std::string & url, const std::string & body, std::string & response, std::map<std::string, std::string>& res_headers, long timeout_ms=4000, bool send_ts_api_header = false); static long patch_response(const std::string & url, const std::string & body, std::string & response, std::map<std::string, std::string>& res_headers, long timeout_ms=4000, bool send_ts_api_header = false); static void extract_response_headers(CURL* curl, std::map<std::string, std::string> &res_headers); };
4,232
C++
.h
66
50.212121
120
0.601304
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,875
posting.h
typesense_typesense/include/posting.h
#pragma once #include <cstdint> #include <vector> #include <or_iterator.h> #include "posting_list.h" #include "threadpool.h" #define IS_COMPACT_POSTING(x) (((uintptr_t)(x) & 1)) #define SET_COMPACT_POSTING(x) ((void*)((uintptr_t)(x) | 1)) #define RAW_POSTING_PTR(x) ((void*)((uintptr_t)(x) & ~1)) #define COMPACT_POSTING_PTR(x) ((compact_posting_list_t*)((uintptr_t)(x) & ~1)) struct compact_posting_list_t { // structured to get 4 byte alignment for `id_offsets` uint8_t length = 0; uint8_t ids_length = 0; uint16_t capacity = 0; // format: num_offsets, offset1,..,offsetn, id1 | num_offsets, offset1,..,offsetn, id2 uint32_t id_offsets[]; static compact_posting_list_t* create(uint32_t num_ids, const uint32_t* ids, const uint32_t* offset_index, uint32_t num_offsets, const uint32_t* offsets); [[nodiscard]] posting_list_t* to_full_posting_list() const; bool contains(uint32_t id); int64_t upsert(uint32_t id, const std::vector<uint32_t>& offsets); int64_t upsert(uint32_t id, const uint32_t* offsets, uint32_t num_offsets); void erase(uint32_t id); uint32_t first_id(); uint32_t last_id(); [[nodiscard]] uint32_t num_ids() const; bool contains_atleast_one(const uint32_t* target_ids, size_t target_ids_size); }; class posting_t { public: static constexpr size_t COMPACT_LIST_THRESHOLD_LENGTH = 64; static constexpr size_t MAX_BLOCK_ELEMENTS = 256; struct block_intersector_t { std::vector<posting_list_t*> plists; std::vector<posting_list_t*> expanded_plists; result_iter_state_t& iter_state; block_intersector_t(const std::vector<void*>& raw_posting_lists, result_iter_state_t& iter_state): iter_state(iter_state) { to_expanded_plists(raw_posting_lists, plists, expanded_plists); if(plists.size() > 1) { std::sort(this->plists.begin(), this->plists.end(), [](posting_list_t* a, posting_list_t* b) { return a->num_blocks() < b->num_blocks(); }); } } ~block_intersector_t() { for(auto expanded_plist: expanded_plists) { delete expanded_plist; } } template<class T> bool intersect(T func); }; static void to_expanded_plists(const std::vector<void*>& raw_posting_lists, std::vector<posting_list_t*>& plists, std::vector<posting_list_t*>& expanded_plists); static void upsert(void*& obj, uint32_t id, const std::vector<uint32_t>& offsets); static void erase(void*& obj, uint32_t id); static void destroy_list(void*& obj); static uint32_t num_ids(const void* obj); static uint32_t first_id(const void* obj); static bool contains(const void* obj, uint32_t id); static bool contains_atleast_one(const void* obj, const uint32_t* target_ids, size_t target_ids_size); static void merge(const std::vector<void*>& posting_lists, std::vector<uint32_t>& result_ids); static void intersect(const std::vector<void*>& posting_lists, std::vector<uint32_t>& result_ids, const uint32_t& context_ids_length = 0, const uint32_t* context_ids = nullptr); static void get_array_token_positions( uint32_t id, const std::vector<void*>& posting_lists, std::map<size_t, std::vector<token_positions_t>>& array_token_positions ); static void get_exact_matches(const std::vector<void*>& raw_posting_lists, bool field_is_array, const uint32_t* ids, uint32_t num_ids, uint32_t*& exact_ids, size_t& num_exact_ids); static void get_phrase_matches(const std::vector<void*>& raw_posting_lists, bool field_is_array, const uint32_t* ids, uint32_t num_ids, uint32_t*& phrase_ids, size_t& num_phrase_ids); static void get_matching_array_indices(const std::vector<void*>& raw_posting_lists, uint32_t id, std::vector<size_t>& indices); static void get_or_iterator(void*& raw_posting_lists, std::vector<or_iterator_t>& or_iterators, std::vector<posting_list_t*>& expanded_plists); }; template<class T> bool posting_t::block_intersector_t::intersect(T func) { if(plists.empty()) { return true; } std::vector<posting_list_t::iterator_t> its; its.reserve(plists.size()); for(const auto& posting_list: plists) { its.push_back(posting_list->new_iterator()); } posting_list_t::block_intersect<T>(its, iter_state, func); return true; }
4,839
C++
.h
97
40.103093
117
0.614158
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,877
vq_model_manager.h
typesense_typesense/include/vq_model_manager.h
#pragma once #include <unordered_map> #include "vq_model.h" #include "json.hpp" #include "embedder_manager.h" class VQModelManager { private: inline static const std::string MODELS_REPO_URL = "https://models.typesense.org/public/"; static const std::string get_model_url(const std::string& model_name); static const std::string get_config_url(); static const Option<nlohmann::json> get_config(); static const std::string get_absolute_model_path(const std::string& model_name); static const std::string get_model_namespace(const std::string& model_name); static const std::string get_model_name_without_namespace(const std::string& model_name); Option<bool> download_model(const std::string& model_name); std::unordered_map<std::string, std::shared_ptr<VQModel>> models; VQModelManager() = default; std::shared_mutex models_mutex; std::mutex download_mutex; public: static VQModelManager& get_instance() { static VQModelManager instance; return instance; } VQModelManager(VQModelManager&&) = delete; VQModelManager& operator=(VQModelManager&&) = delete; VQModelManager(const VQModelManager&) = delete; VQModelManager& operator=(const VQModelManager&) = delete; Option<std::shared_ptr<VQModel>> validate_and_init_model(const std::string& model_name); Option<std::shared_ptr<VQModel>> get_model(const std::string& model_name); void delete_model(const std::string& model_name); void delete_all_models(); ~VQModelManager(); void clear_unused_models(); };
1,681
C++
.h
35
40.428571
97
0.677636
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,878
art.h
typesense_typesense/include/art.h
#pragma once #include <stdint.h> #include <stdbool.h> #include <vector> #include <set> #include "array.h" #include "sorted_array.h" #include "filter_result_iterator.h" #include "filter.h" #define IGNORE_PRINTF 1 #ifdef __cplusplus extern "C" { #endif #define NODE4 1 #define NODE16 2 #define NODE48 3 #define NODE256 4 #define MAX_PREFIX_LEN 8 #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #if defined(__GNUC__) && !defined(__clang__) # if __STDC_VERSION__ >= 199901L && 402 == (__GNUC__ * 100 + __GNUC_MINOR__) /* * GCC 4.2.2's C99 inline keyword support is pretty broken; avoid. Introduced in * GCC 4.2.something, fixed in 4.3.0. So checking for specific major.minor of * 4.2 is fine. */ # define BROKEN_GCC_C99_INLINE # endif #endif typedef int(*art_callback)(void *data, const unsigned char *key, uint32_t key_len, void *value); /** * This struct is included as part * of all the various node sizes */ typedef struct { uint8_t type; uint8_t num_children; uint8_t partial_len; unsigned char partial[MAX_PREFIX_LEN]; int64_t max_score; } art_node; /** * Small node with only 4 children */ typedef struct { art_node n; unsigned char keys[4]; art_node *children[4]; } art_node4; /** * Node with 16 children */ typedef struct { art_node n; unsigned char keys[16]; art_node *children[16]; } art_node16; /** * Node with 48 children, but * a full 256 byte field. */ typedef struct { art_node n; unsigned char keys[256]; art_node *children[48]; } art_node48; /** * Full node with 256 children */ typedef struct { art_node n; art_node *children[256]; } art_node256; /** * Container for holding the documents that belong to a leaf. */ typedef struct { sorted_array ids; sorted_array offset_index; array offsets; } art_values; /** * Represents a leaf. These are * of arbitrary size, as they include the key. */ typedef struct { uint32_t key_len; int64_t max_score; void* values; unsigned char key[]; } art_leaf; struct token_leaf { art_leaf* leaf; bool is_prefix; uint32_t root_len; uint32_t num_typos; token_leaf(art_leaf* leaf, uint32_t root_len, uint32_t num_typos, bool is_prefix) : leaf(leaf), root_len(root_len), num_typos(num_typos), is_prefix(is_prefix) { } }; /** * Main struct, points to root. */ typedef struct { art_node *root; uint64_t size; } art_tree; /* * Represents a document to be indexed. * `offsets` refer to the index locations where a token appeared in the document */ struct art_document { const uint32_t id; const int64_t score; const std::vector<uint32_t> offsets; art_document(const uint32_t id, const int64_t score, const std::vector<uint32_t>& offsets): id(id), score(score), offsets(offsets) { } }; enum token_ordering { NOT_SET, FREQUENCY, MAX_SCORE }; /** * Initializes an ART tree * @return 0 on success. */ int art_tree_init(art_tree *t); /** * DEPRECATED * Initializes an ART tree * @return 0 on success. */ #define init_art_tree(...) art_tree_init(__VA_ARGS__) /** * Destroys an ART tree * @return 0 on success. */ int art_tree_destroy(art_tree *t); /** * DEPRECATED * Initializes an ART tree * @return 0 on success. */ #define destroy_art_tree(...) art_tree_destroy(__VA_ARGS__) /** * Returns the size of the ART tree. */ #ifdef BROKEN_GCC_C99_INLINE # define art_size(t) ((t)->size) #else inline uint64_t art_size(art_tree *t) { return t->size; } #endif /** * Inserts a new value into the ART tree * @arg t The tree * @arg key The key * @arg key_len The length of the key * @arg value Opaque value. * @return NULL if the item was newly inserted, otherwise * the old value pointer is returned. */ void* art_insert(art_tree *t, const unsigned char *key, int key_len, art_document* document); /* Insert multiple docs sharing the same key */ void* art_inserts(art_tree *t, const unsigned char *key, int key_len, const int64_t docs_max_score, std::vector<art_document>& documents); /** * Deletes a value from the ART tree * @arg t The tree * @arg key The key * @arg key_len The length of the key * @return NULL if the item was not found, otherwise * the value pointer is returned. */ void* art_delete(art_tree *t, const unsigned char *key, int key_len); /** * Searches for a value in the ART tree * @arg t The tree * @arg key The key * @arg key_len The length of the key * @return NULL if the item was not found, otherwise * the value pointer is returned. */ void* art_search(const art_tree *t, const unsigned char *key, int key_len); /** * Returns the minimum valued leaf * @return The minimum leaf or NULL */ art_leaf* art_minimum(art_tree *t); /** * Returns the maximum valued leaf * @return The maximum leaf or NULL */ art_leaf* art_maximum(art_tree *t); /** * Iterates through the entries pairs in the map, * invoking a callback for each. The call back gets a * key, value for each and returns an integer stop value. * If the callback returns non-zero, then the iteration stops. * @arg t The tree to iterate over * @arg cb The callback function to invoke * @arg data Opaque handle passed to the callback * @return 0 on success, or the return of the callback. */ int art_iter(art_tree *t, art_callback cb, void *data); /** * Iterates through the entries pairs in the map, * invoking a callback for each that matches a given prefix. * The call back gets a key, value for each and returns an integer stop value. * If the callback returns non-zero, then the iteration stops. * @arg t The tree to iterate over * @arg prefix The prefix of keys to read * @arg prefix_len The length of the prefix * @arg cb The callback function to invoke * @arg data Opaque handle passed to the callback * @return 0 on success, or the return of the callback. */ int art_iter_prefix(art_tree *t, const unsigned char *prefix, int prefix_len, art_callback cb, void *data); /** * Returns leaves that match a given string within a fuzzy distance of max_cost. */ int art_fuzzy_search(art_tree *t, const unsigned char *term, const int term_len, const int min_cost, const int max_cost, const size_t max_words, const token_ordering token_order, const bool prefix, bool last_token, const std::string& prev_token, const uint32_t *filter_ids, const size_t filter_ids_length, std::vector<art_leaf *> &results, std::set<std::string>& exclude_leaves); int art_fuzzy_search_i(art_tree *t, const unsigned char *term, const int term_len, const int min_cost, const int max_cost, const size_t max_words, const token_ordering token_order, const bool prefix, bool last_token, const std::string& prev_token, filter_result_iterator_t* const filter_result_iterator, std::vector<art_leaf *> &results, std::set<std::string>& exclude_leaves); void encode_int32(int32_t n, unsigned char *chars); void encode_int64(int64_t n, unsigned char *chars); void encode_float(float n, unsigned char *chars); int art_int32_search(art_tree *t, int32_t value, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results); int art_int64_search(art_tree *t, int64_t value, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results); int art_float_search(art_tree *t, float value, NUM_COMPARATOR comparator, std::vector<const art_leaf *> &results); #ifdef __cplusplus } #endif
7,562
C++
.h
243
28.032922
122
0.690404
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false
3,879
array_base.h
typesense_typesense/include/array_base.h
#pragma once #include <stdio.h> #include <cstdlib> #include <for.h> #include <cstring> #include <limits> #include <iostream> #define FOR_GROWTH_FACTOR 1.3 #define FOR_ELE_SIZE sizeof(uint32_t) #define METADATA_OVERHEAD 5 class array_base { protected: uint8_t* in = nullptr; uint32_t size_bytes = 0; // allocated size uint32_t length_bytes = 0; // actual size uint32_t length = 0; uint32_t min = std::numeric_limits<uint32_t>::max(); uint32_t max = std::numeric_limits<uint32_t>::min(); static inline uint32_t required_bits(const uint32_t v) { return (uint32_t) (v == 0 ? 0 : 32 - __builtin_clz(v)); } public: explicit array_base(const uint32_t n=2) { size_bytes = METADATA_OVERHEAD + (n * FOR_ELE_SIZE); in = (uint8_t *) malloc(size_bytes * sizeof *in); memset(in, 0, size_bytes); } ~array_base() { free(in); in = nullptr; } // len determines length of output buffer (default: length of input) uint32_t* uncompress(uint32_t len=0) const; uint32_t getSizeInBytes(); uint32_t getLength() const; uint32_t getMin() const; uint32_t getMax() const; };
1,180
C++
.h
38
26.631579
72
0.64576
typesense/typesense
20,571
633
548
GPL-3.0
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
false
false
false
false
false
false
false
false