diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..a1dc4eac --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,18 @@ + +# Upgrade for a new version + +Here is a checklist for upgrading Rust Qdrant client to a new version of Qdrant server: + +- [ ] Make sure to switch to a new branch from `dev`. Something like `v1-XX-upgrade` should be good enough. +- [ ] Synchronize protobuf definitions using `./tools/sync_proto.sh` script. +- [ ] Run `cargo test protos` to make sure auto-generated code is generated. + +Based on the changes in protobuf, there are following places to upgrade: + +- [ ] if there are new APIs, they should be added to appropriate file in `src/qdrant_client` and be part of `impl Qdrant` +- [ ] if there are new parameters for existing APIs, they should be added to appropriate builders in `src/qdrant_client/builders` +- [ ] if there are new structures, which require complicated construction, simplified versions should be added to `src/qdrant_client/conversions` like for example `impl From<&[f32]> for Vector` +- [ ] Examples with usage of all new changes should be added to `tests/snippets`, similar to existing ones. + + + diff --git a/Cargo.toml b/Cargo.toml index 843b32ae..a661d317 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,10 +43,6 @@ uuid = ["dep:uuid"] name = "query" required-features = ["serde"] -[[example]] -name = "deprecated_search" -required-features = ["serde"] - [package.metadata.docs.rs] features = ["download_snapshots", "serde"] no-default-features = true diff --git a/examples/deprecated_search.rs b/examples/deprecated_search.rs deleted file mode 100644 index 162332e7..00000000 --- a/examples/deprecated_search.rs +++ /dev/null @@ -1,120 +0,0 @@ -use anyhow::Result; -#[allow(deprecated)] -use qdrant_client::prelude::*; -use qdrant_client::qdrant::vectors_config::Config; -use qdrant_client::qdrant::{ - Condition, CreateCollection, Filter, SearchPoints, VectorParams, VectorsConfig, -}; -use serde_json::json; - -#[allow(deprecated)] -#[tokio::main] -async fn main() -> Result<()> { - // Example of top level client - // You may also use tonic-generated client from `src/qdrant.rs` - let config = QdrantClientConfig::from_url("http://localhost:6334"); - let client = QdrantClient::new(Some(config))?; - - let collections_list = client.list_collections().await?; - dbg!(collections_list); - // collections_list = ListCollectionsResponse { - // collections: [ - // CollectionDescription { - // name: "test", - // }, - // ], - // time: 1.78e-6, - // } - - let collection_name = "test"; - client.delete_collection(collection_name).await?; - - client - .create_collection(&CreateCollection { - collection_name: collection_name.into(), - vectors_config: Some(VectorsConfig { - config: Some(Config::Params(VectorParams { - size: 10, - distance: Distance::Cosine.into(), - ..Default::default() - })), - }), - ..Default::default() - }) - .await?; - - let collection_info = client.collection_info(collection_name).await?; - dbg!(collection_info); - - let payload: Payload = json!( - { - "foo": "Bar", - "bar": 12, - "baz": { - "qux": "quux" - } - } - ) - .try_into() - .unwrap(); - - let points = vec![PointStruct::new(0, vec![12.; 10], payload)]; - client - .upsert_points_blocking(collection_name, None, points, None) - .await?; - - let search_result = client - .search_points(&SearchPoints { - collection_name: collection_name.into(), - vector: vec![11.; 10], - filter: Some(Filter::all([Condition::matches("bar", 12)])), - limit: 10, - with_payload: Some(true.into()), - ..Default::default() - }) - .await?; - dbg!(&search_result); - // search_result = SearchResponse { - // result: [ - // ScoredPoint { - // id: Some( - // PointId { - // point_id_options: Some( - // Num( - // 0, - // ), - // ), - // }, - // ), - // payload: { - // "bar": Value { - // kind: Some( - // IntegerValue( - // 12, - // ), - // ), - // }, - // "foo": Value { - // kind: Some( - // StringValue( - // "Bar", - // ), - // ), - // }, - // }, - // score: 1.0000001, - // version: 0, - // vectors: None, - // }, - // ], - // time: 9.5394e-5, - // } - - let found_point = search_result.result.into_iter().next().unwrap(); - let mut payload = found_point.payload; - let baz_payload = payload.remove("baz").unwrap().into_json(); - println!("baz: {baz_payload}"); - // baz: {"qux":"quux"} - - Ok(()) -} diff --git a/proto/collections.proto b/proto/collections.proto index df9c27bd..92bf455a 100644 --- a/proto/collections.proto +++ b/proto/collections.proto @@ -4,7 +4,7 @@ package qdrant; option csharp_namespace = "Qdrant.Client.Grpc"; import "json_with_int.proto"; -import "common.proto"; +import "qdrant_common.proto"; enum Datatype { Default = 0; @@ -18,19 +18,34 @@ enum Datatype { // --------------------------------------------- message VectorParams { - uint64 size = 1; // Size of the vectors - Distance distance = 2; // Distance function used for comparing vectors - optional HnswConfigDiff hnsw_config = 3; // Configuration of vector HNSW graph. If omitted - the collection configuration will be used - optional QuantizationConfig quantization_config = 4; // Configuration of vector quantization config. If omitted - the collection configuration will be used - optional bool on_disk = 5; // If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM. - optional Datatype datatype = 6; // Data type of the vectors - optional MultiVectorConfig multivector_config = 7; // Configuration for multi-vector search + // Size of the vectors + uint64 size = 1; + // Distance function used for comparing vectors + Distance distance = 2; + // Configuration of vector HNSW graph. + // If omitted - the collection configuration will be used + optional HnswConfigDiff hnsw_config = 3; + // Configuration of vector quantization config. + // If omitted - the collection configuration will be used + optional QuantizationConfig quantization_config = 4; + // If true - serve vectors from disk. + // If set to false, the vectors will be loaded in RAM. + optional bool on_disk = 5; + // Data type of the vectors + optional Datatype datatype = 6; + // Configuration for multi-vector search + optional MultiVectorConfig multivector_config = 7; } message VectorParamsDiff { - optional HnswConfigDiff hnsw_config = 1; // Update params for HNSW index. If empty object - it will be unset - optional QuantizationConfigDiff quantization_config = 2; // Update quantization params. If none - it is left unchanged. - optional bool on_disk = 3; // If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM. + // Update params for HNSW index. + // If empty object - it will be unset + optional HnswConfigDiff hnsw_config = 1; + // Update quantization params. If none - it is left unchanged. + optional QuantizationConfigDiff quantization_config = 2; + // If true - serve vectors from disk. + // If set to false, the vectors will be loaded in RAM. + optional bool on_disk = 3; } message VectorParamsMap { @@ -57,12 +72,15 @@ message VectorsConfigDiff { enum Modifier { None = 0; - Idf = 1; // Apply Inverse Document Frequency + // Apply Inverse Document Frequency + Idf = 1; } message SparseVectorParams { - optional SparseIndexConfig index = 1; // Configuration of sparse index - optional Modifier modifier = 2; // If set - apply modifier to the vector values + // Configuration of sparse index + optional SparseIndexConfig index = 1; + // If set - apply modifier to the vector values + optional Modifier modifier = 2; } message SparseVectorConfig { @@ -74,11 +92,13 @@ enum MultiVectorComparator { } message MultiVectorConfig { - MultiVectorComparator comparator = 1; // Comparator for multi-vector search + // Comparator for multi-vector search + MultiVectorComparator comparator = 1; } message GetCollectionInfoRequest { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; } message CollectionExistsRequest { @@ -91,23 +111,27 @@ message CollectionExists { message CollectionExistsResponse { CollectionExists result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; } message ListCollectionsRequest {} message CollectionDescription { - string name = 1; // Name of the collection + // Name of the collection + string name = 1; } message GetCollectionInfoResponse { CollectionInfo result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; } message ListCollectionsResponse { repeated CollectionDescription collections = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; } enum Distance { @@ -120,10 +144,14 @@ enum Distance { enum CollectionStatus { UnknownCollectionStatus = 0; - Green = 1; // All segments are ready - Yellow = 2; // Optimization in process - Red = 3; // Something went wrong - Grey = 4; // Optimization is pending + // All segments are ready + Green = 1; + // Optimization in process + Yellow = 2; + // Something went wrong + Red = 3; + // Optimization is pending + Grey = 4; } enum PayloadSchemaType { @@ -172,149 +200,135 @@ message CollectionWarning { } message HnswConfigDiff { - /* - Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. - */ + // Number of edges per node in the index graph. + // Larger the value - more accurate the search, more space required. optional uint64 m = 1; - /* - Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index. - */ + // Number of neighbours to consider during the index building. + // Larger the value - more accurate the search, more time required to build the index. optional uint64 ef_construct = 2; - /* - Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. - This measures the total size of vectors being queried against. - When the maximum estimated amount of points that a condition satisfies is smaller than - `full_scan_threshold`, the query planner will use full-scan search instead of HNSW index - traversal for better performance. - Note: 1Kb = 1 vector of size 256 - */ + // Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. + // This measures the total size of vectors being queried against. + // When the maximum estimated amount of points that a condition satisfies is smaller than + // `full_scan_threshold`, the query planner will use full-scan search instead of HNSW index + // traversal for better performance. + // Note: 1Kb = 1 vector of size 256 optional uint64 full_scan_threshold = 3; - /* - Number of parallel threads used for background index building. - If 0 - automatically select from 8 to 16. - Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. - On small CPUs, less threads are used. - */ + // Number of parallel threads used for background index building. + // If 0 - automatically select from 8 to 16. + // Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. + // On small CPUs, less threads are used. optional uint64 max_indexing_threads = 4; - /* - Store HNSW index on disk. If set to false, the index will be stored in RAM. - */ + // Store HNSW index on disk. If set to false, the index will be stored in RAM. optional bool on_disk = 5; - /* - Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used. - */ + // Number of additional payload-aware links per node in the index graph. + // If not set - regular M parameter will be used. optional uint64 payload_m = 6; - /* - Store copies of original and quantized vectors within the HNSW index file. Default: false. - Enabling this option will trade the search speed for disk usage by reducing amount of - random seeks during the search. - Requires quantized vectors to be enabled. Multi-vectors are not supported. - */ + // Store copies of original and quantized vectors within the HNSW index file. Default: false. + // Enabling this option will trade the search speed for disk usage by reducing amount of + // random seeks during the search. + // Requires quantized vectors to be enabled. Multi-vectors are not supported. optional bool inline_storage = 7; } message SparseIndexConfig { - /* - Prefer a full scan search upto (excluding) this number of vectors. - Note: this is number of vectors, not KiloBytes. - */ + // Prefer a full scan search upto (excluding) this number of vectors. + // Note: this is number of vectors, not KiloBytes. optional uint64 full_scan_threshold = 1; - /* - Store inverted index on disk. If set to false, the index will be stored in RAM. - */ + // Store inverted index on disk. If set to false, the index will be stored in RAM. optional bool on_disk = 2; - /* - Datatype used to store weights in the index. - */ + // Datatype used to store weights in the index. optional Datatype datatype = 3; } message WalConfigDiff { - optional uint64 wal_capacity_mb = 1; // Size of a single WAL block file - optional uint64 wal_segments_ahead = 2; // Number of segments to create in advance - optional uint64 wal_retain_closed = 3; // Number of closed segments to retain + // Size of a single WAL block file + optional uint64 wal_capacity_mb = 1; + // Number of segments to create in advance + optional uint64 wal_segments_ahead = 2; + // Number of closed segments to retain + optional uint64 wal_retain_closed = 3; } message OptimizersConfigDiff { - /* - The minimal fraction of deleted vectors in a segment, required to perform segment optimization - */ + // The minimal fraction of deleted vectors in a segment, required to perform + // segment optimization optional double deleted_threshold = 1; - /* - The minimal number of vectors in a segment, required to perform segment optimization - */ + // The minimal number of vectors in a segment, required to perform segment + // optimization optional uint64 vacuum_min_vector_number = 2; - /* - Target amount of segments the optimizer will try to keep. - Real amount of segments may vary depending on multiple parameters: - - - Amount of stored points. - - Current write RPS. - - It is recommended to select the default number of segments as a factor of the number of search threads, - so that each segment would be handled evenly by one of the threads. - */ + // Target amount of segments the optimizer will try to keep. + // Real amount of segments may vary depending on multiple parameters: + // + // - Amount of stored points. + // - Current write RPS. + // + // It is recommended to select the default number of segments as a factor of the number of search threads, + // so that each segment would be handled evenly by one of the threads. optional uint64 default_segment_number = 3; - /* - Deprecated: - - Do not create segments larger this size (in kilobytes). - Large segments might require disproportionately long indexation times, - therefore it makes sense to limit the size of segments. - - If indexing speed is more important - make this parameter lower. - If search speed is more important - make this parameter higher. - Note: 1Kb = 1 vector of size 256 - If not set, will be automatically selected considering the number of available CPUs. - */ + // Deprecated: + // + // Do not create segments larger this size (in kilobytes). + // Large segments might require disproportionately long indexation times, + // therefore it makes sense to limit the size of segments. + // + // If indexing speed is more important - make this parameter lower. + // If search speed is more important - make this parameter higher. + // Note: 1Kb = 1 vector of size 256 + // If not set, will be automatically selected considering the number of available CPUs. optional uint64 max_segment_size = 4; - /* - Maximum size (in kilobytes) of vectors to store in-memory per segment. - Segments larger than this threshold will be stored as read-only memmapped file. - - Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value. - - To disable memmap storage, set this to `0`. - - Note: 1Kb = 1 vector of size 256 - */ + // Maximum size (in kilobytes) of vectors to store in-memory per segment. + // Segments larger than this threshold will be stored as read-only memmapped file. + // + // Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value. + // + // To disable memmap storage, set this to `0`. + // + // Note: 1Kb = 1 vector of size 256 optional uint64 memmap_threshold = 5; - /* - Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing - - Default value is 20,000, based on . - - To disable vector indexing, set to `0`. - - Note: 1kB = 1 vector of size 256. - */ + // Maximum size (in kilobytes) of vectors allowed for plain index, exceeding + // this threshold will enable vector indexing + // + // Default value is 20,000, based on + // . + // + // To disable vector indexing, set to `0`. + // + // Note: 1kB = 1 vector of size 256. optional uint64 indexing_threshold = 6; - /* - Interval between forced flushes. - */ + // Interval between forced flushes. optional uint64 flush_interval_sec = 7; // Deprecated in favor of `max_optimization_threads` optional uint64 deprecated_max_optimization_threads = 8; - /* - Max number of threads (jobs) for running optimizations per shard. - Note: each optimization job will also use `max_indexing_threads` threads by itself for index building. - If "auto" - have no limit and choose dynamically to saturate CPU. - If 0 - no optimization threads, optimizations will be disabled. - */ + // Max number of threads (jobs) for running optimizations per shard. + // Note: each optimization job will also use `max_indexing_threads` threads by itself for index building. + // If "auto" - have no limit and choose dynamically to saturate CPU. + // If 0 - no optimization threads, optimizations will be disabled. optional MaxOptimizationThreads max_optimization_threads = 9; + + // If this option is set, service will try to prevent creation of large unoptimized segments. + // When enabled, updates may be blocked at request level if there are unoptimized segments larger than indexing threshold. + // Updates will be resumed when optimization is completed and segments are optimized below the threshold. + // Using this option may lead to increased delay between submitting an update and its application. + // Default is disabled. + optional bool prevent_unoptimized = 10; } message ScalarQuantization { - QuantizationType type = 1; // Type of quantization - optional float quantile = 2; // Number of bits to use for quantization - optional bool always_ram = 3; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage + // Type of quantization + QuantizationType type = 1; + // Number of bits to use for quantization + optional float quantile = 2; + // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage + optional bool always_ram = 3; } message ProductQuantization { - CompressionRatio compression = 1; // Compression ratio - optional bool always_ram = 2; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage + // Compression ratio + CompressionRatio compression = 1; + // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage + optional bool always_ram = 2; } enum BinaryQuantizationEncoding { @@ -324,25 +338,26 @@ enum BinaryQuantizationEncoding { } message BinaryQuantizationQueryEncoding { - enum Setting { - Default = 0; - Binary = 1; - Scalar4Bits = 2; - Scalar8Bits = 3; - } + enum Setting { + Default = 0; + Binary = 1; + Scalar4Bits = 2; + Scalar8Bits = 3; + } - oneof variant { - Setting setting = 4; - } + oneof variant { + Setting setting = 4; + } } message BinaryQuantization { - optional bool always_ram = 1; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage - optional BinaryQuantizationEncoding encoding = 2; // Binary quantization encoding method - /* - Asymmetric quantization configuration allows a query to have different quantization than stored vectors. - It can increase the accuracy of search at the cost of performance. - */ + // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage + optional bool always_ram = 1; + // Binary quantization encoding method + optional BinaryQuantizationEncoding encoding = 2; + // Asymmetric quantization configuration allows a query to have different + // quantization than stored vectors. + // It can increase the accuracy of search at the cost of performance. optional BinaryQuantizationQueryEncoding query_encoding = 3; } @@ -366,30 +381,51 @@ message QuantizationConfigDiff { } enum ShardingMethod { - Auto = 0; // Auto-sharding based on record ids - Custom = 1; // Shard by user-defined key + // Auto-sharding based on record ids + Auto = 0; + // Shard by user-defined key + Custom = 1; } message StrictModeConfig { - optional bool enabled = 1; // Whether strict mode is enabled for a collection or not. - optional uint32 max_query_limit = 2; // Max allowed `limit` parameter for all APIs that don't have their own max limit. - optional uint32 max_timeout = 3; // Max allowed `timeout` parameter. - optional bool unindexed_filtering_retrieve = 4; // Allow usage of unindexed fields in retrieval based (e.g. search) filters. - optional bool unindexed_filtering_update = 5; // Allow usage of unindexed fields in filtered updates (e.g. delete by payload). - optional uint32 search_max_hnsw_ef = 6; // Max HNSW ef value allowed in search parameters. - optional bool search_allow_exact = 7; // Whether exact search is allowed. - optional float search_max_oversampling = 8; // Max oversampling value allowed in search - optional uint64 upsert_max_batchsize = 9; // Max batchsize when upserting - optional uint64 max_collection_vector_size_bytes = 10; // Max size of a collections vector storage in bytes, ignoring replicas. - optional uint32 read_rate_limit = 11; // Max number of read operations per minute per replica - optional uint32 write_rate_limit = 12; // Max number of write operations per minute per replica - optional uint64 max_collection_payload_size_bytes = 13; // Max size of a collections payload storage in bytes, ignoring replicas. - optional uint64 filter_max_conditions = 14; // Max conditions a filter can have. - optional uint64 condition_max_size = 15; // Max size of a condition, eg. items in `MatchAny`. - optional StrictModeMultivectorConfig multivector_config = 16; // Multivector strict mode configuration - optional StrictModeSparseConfig sparse_config = 17; // Sparse vector strict mode configuration - optional uint64 max_points_count = 18; // Max number of points estimated in a collection - optional uint64 max_payload_index_count = 19; // Max number of payload indexes in a collection + // Whether strict mode is enabled for a collection or not. + optional bool enabled = 1; + // Max allowed `limit` parameter for all APIs that don't have their own max limit. + optional uint32 max_query_limit = 2; + // Max allowed `timeout` parameter. + optional uint32 max_timeout = 3; + // Allow usage of unindexed fields in retrieval based (e.g. search) filters. + optional bool unindexed_filtering_retrieve = 4; + // Allow usage of unindexed fields in filtered updates (e.g. delete by payload). + optional bool unindexed_filtering_update = 5; + // Max HNSW ef value allowed in search parameters. + optional uint32 search_max_hnsw_ef = 6; + // Whether exact search is allowed. + optional bool search_allow_exact = 7; + // Max oversampling value allowed in search + optional float search_max_oversampling = 8; + // Max batchsize when upserting + optional uint64 upsert_max_batchsize = 9; + // Max size of a collections vector storage in bytes, ignoring replicas. + optional uint64 max_collection_vector_size_bytes = 10; + // Max number of read operations per minute per replica + optional uint32 read_rate_limit = 11; + // Max number of write operations per minute per replica + optional uint32 write_rate_limit = 12; + // Max size of a collections payload storage in bytes, ignoring replicas. + optional uint64 max_collection_payload_size_bytes = 13; + // Max conditions a filter can have. + optional uint64 filter_max_conditions = 14; + // Max size of a condition, eg. items in `MatchAny`. + optional uint64 condition_max_size = 15; + // Multivector strict mode configuration + optional StrictModeMultivectorConfig multivector_config = 16; + // Sparse vector strict mode configuration + optional StrictModeSparseConfig sparse_config = 17; + // Max number of points estimated in a collection + optional uint64 max_points_count = 18; + // Max number of payload indexes in a collection + optional uint64 max_payload_index_count = 19; } message StrictModeSparseConfig { @@ -397,7 +433,8 @@ message StrictModeSparseConfig { } message StrictModeSparse { - optional uint64 max_length = 10; // Max length of sparse vector + // Max length of sparse vector + optional uint64 max_length = 10; } message StrictModeMultivectorConfig { @@ -405,81 +442,146 @@ message StrictModeMultivectorConfig { } message StrictModeMultivector { - optional uint64 max_vectors = 1; // Max number of vectors in a multivector + // Max number of vectors in a multivector + optional uint64 max_vectors = 1; } message CreateCollection { - string collection_name = 1; // Name of the collection - reserved 2; // Deprecated - reserved 3; // Deprecated - optional HnswConfigDiff hnsw_config = 4; // Configuration of vector index - optional WalConfigDiff wal_config = 5; // Configuration of the Write-Ahead-Log - optional OptimizersConfigDiff optimizers_config = 6; // Configuration of the optimizers - optional uint32 shard_number = 7; // Number of shards in the collection, default is 1 for standalone, otherwise equal to the number of nodes. Minimum is 1 - optional bool on_disk_payload = 8; // If true - point's payload will not be stored in memory - optional uint64 timeout = 9; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied - optional VectorsConfig vectors_config = 10; // Configuration for vectors - optional uint32 replication_factor = 11; // Number of replicas of each shard that network tries to maintain, default = 1 - optional uint32 write_consistency_factor = 12; // How many replicas should apply the operation for us to consider it successful, default = 1 - reserved 13; // Deprecated: init_from - optional QuantizationConfig quantization_config = 14; // Quantization configuration of vector - optional ShardingMethod sharding_method = 15; // Sharding method - optional SparseVectorConfig sparse_vectors_config = 16; // Configuration for sparse vectors - optional StrictModeConfig strict_mode_config = 17; // Configuration for strict mode - map metadata = 18; // Arbitrary JSON metadata for the collection + // Name of the collection + string collection_name = 1; + // Deprecated + reserved 2; + // Deprecated + reserved 3; + // Configuration of vector index + optional HnswConfigDiff hnsw_config = 4; + // Configuration of the Write-Ahead-Log + optional WalConfigDiff wal_config = 5; + // Configuration of the optimizers + optional OptimizersConfigDiff optimizers_config = 6; + // Number of shards in the collection, default is 1 for standalone, otherwise + // equal to the number of nodes. Minimum is 1 + optional uint32 shard_number = 7; + // If true - point's payload will not be stored in memory + optional bool on_disk_payload = 8; + // Wait timeout for operation commit in seconds, if not specified - default + // value will be supplied + optional uint64 timeout = 9; + // Configuration for vectors + optional VectorsConfig vectors_config = 10; + // Number of replicas of each shard that network tries to maintain, default = 1 + optional uint32 replication_factor = 11; + // How many replicas should apply the operation for us to consider it successful, default = 1 + optional uint32 write_consistency_factor = 12; + // Deprecated: init_from + reserved 13; + // Quantization configuration of vector + optional QuantizationConfig quantization_config = 14; + // Sharding method + optional ShardingMethod sharding_method = 15; + // Configuration for sparse vectors + optional SparseVectorConfig sparse_vectors_config = 16; + // Configuration for strict mode + optional StrictModeConfig strict_mode_config = 17; + // Arbitrary JSON metadata for the collection + map metadata = 18; } message UpdateCollection { - string collection_name = 1; // Name of the collection - optional OptimizersConfigDiff optimizers_config = 2; // New configuration parameters for the collection. This operation is blocking, it will only proceed once all current optimizations are complete - optional uint64 timeout = 3; // Wait timeout for operation commit in seconds if blocking, if not specified - default value will be supplied - optional CollectionParamsDiff params = 4; // New configuration parameters for the collection - optional HnswConfigDiff hnsw_config = 5; // New HNSW parameters for the collection index - optional VectorsConfigDiff vectors_config = 6; // New vector parameters - optional QuantizationConfigDiff quantization_config = 7; // Quantization configuration of vector - optional SparseVectorConfig sparse_vectors_config = 8; // New sparse vector parameters - optional StrictModeConfig strict_mode_config = 9; // New strict mode configuration - map metadata = 10; // Arbitrary JSON-like metadata for the collection, will be merged with already stored metadata + // Name of the collection + string collection_name = 1; + // New configuration parameters for the collection. + // This operation is blocking, it will only proceed once all current + // optimizations are complete + optional OptimizersConfigDiff optimizers_config = 2; + // Wait timeout for operation commit in seconds if blocking. + // If not specified - default value will be supplied. + optional uint64 timeout = 3; + // New configuration parameters for the collection + optional CollectionParamsDiff params = 4; + // New HNSW parameters for the collection index + optional HnswConfigDiff hnsw_config = 5; + // New vector parameters + optional VectorsConfigDiff vectors_config = 6; + // Quantization configuration of vector + optional QuantizationConfigDiff quantization_config = 7; + // New sparse vector parameters + optional SparseVectorConfig sparse_vectors_config = 8; + // New strict mode configuration + optional StrictModeConfig strict_mode_config = 9; + // Arbitrary JSON-like metadata for the collection, will be merged with + // already stored metadata + map metadata = 10; } message DeleteCollection { - string collection_name = 1; // Name of the collection - optional uint64 timeout = 2; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied + // Name of the collection + string collection_name = 1; + // Wait timeout for operation commit in seconds. + // If not specified - default value will be supplied. + optional uint64 timeout = 2; } message CollectionOperationResponse { - bool result = 1; // if operation made changes - double time = 2; // Time spent to process + // if operation made changes + bool result = 1; + // Time spent to process + double time = 2; } message CollectionParams { - reserved 1; // Deprecated - reserved 2; // Deprecated - uint32 shard_number = 3; // Number of shards in collection - bool on_disk_payload = 4; // If true - point's payload will not be stored in memory - optional VectorsConfig vectors_config = 5; // Configuration for vectors - optional uint32 replication_factor = 6; // Number of replicas of each shard that network tries to maintain - optional uint32 write_consistency_factor = 7; // How many replicas should apply the operation for us to consider it successful - optional uint32 read_fan_out_factor = 8; // Fan-out every read request to these many additional remote nodes (and return first available response) - optional ShardingMethod sharding_method = 9; // Sharding method - optional SparseVectorConfig sparse_vectors_config = 10; // Configuration for sparse vectors + // Deprecated + reserved 1; + // Deprecated + reserved 2; + // Number of shards in collection + uint32 shard_number = 3; + // If true - point's payload will not be stored in memory + bool on_disk_payload = 4; + // Configuration for vectors + optional VectorsConfig vectors_config = 5; + // Number of replicas of each shard that network tries to maintain + optional uint32 replication_factor = 6; + // How many replicas should apply the operation for us to consider it successful + optional uint32 write_consistency_factor = 7; + // Fan-out every read request to these many additional remote nodes (and return first available response) + optional uint32 read_fan_out_factor = 8; + // Sharding method + optional ShardingMethod sharding_method = 9; + // Configuration for sparse vectors + optional SparseVectorConfig sparse_vectors_config = 10; + // Define number of milliseconds to wait before attempting to read from another replica. + optional uint64 read_fan_out_delay_ms = 11; } message CollectionParamsDiff { - optional uint32 replication_factor = 1; // Number of replicas of each shard that network tries to maintain - optional uint32 write_consistency_factor = 2; // How many replicas should apply the operation for us to consider it successful - optional bool on_disk_payload = 3; // If true - point's payload will not be stored in memory - optional uint32 read_fan_out_factor = 4; // Fan-out every read request to these many additional remote nodes (and return first available response) + // Number of replicas of each shard that network tries to maintain + optional uint32 replication_factor = 1; + // How many replicas should apply the operation for us to consider it successful + optional uint32 write_consistency_factor = 2; + // If true - point's payload will not be stored in memory + optional bool on_disk_payload = 3; + // Fan-out every read request to these many additional remote nodes (and return first available response) + optional uint32 read_fan_out_factor = 4; + // Define number of milliseconds to wait before attempting to read from another replica. + optional uint64 read_fan_out_delay_ms = 5; } message CollectionConfig { - CollectionParams params = 1; // Collection parameters - HnswConfigDiff hnsw_config = 2; // Configuration of vector index - OptimizersConfigDiff optimizer_config = 3; // Configuration of the optimizers - WalConfigDiff wal_config = 4; // Configuration of the Write-Ahead-Log - optional QuantizationConfig quantization_config = 5; // Configuration of the vector quantization - optional StrictModeConfig strict_mode_config = 6; // Configuration of strict mode. - map metadata = 7; // Arbitrary JSON metadata for the collection + // Collection parameters + CollectionParams params = 1; + // Configuration of vector index + HnswConfigDiff hnsw_config = 2; + // Configuration of the optimizers + OptimizersConfigDiff optimizer_config = 3; + // Configuration of the Write-Ahead-Log + WalConfigDiff wal_config = 4; + // Configuration of the vector quantization + optional QuantizationConfig quantization_config = 5; + // Configuration of strict mode. + optional StrictModeConfig strict_mode_config = 6; + // Arbitrary JSON metadata for the collection + map metadata = 7; } enum TokenizerType { @@ -491,103 +593,199 @@ enum TokenizerType { } message KeywordIndexParams { - optional bool is_tenant = 1; // If true - used for tenant optimization. - optional bool on_disk = 2; // If true - store index on disk. + // If true - used for tenant optimization. + optional bool is_tenant = 1; + // If true - store index on disk. + optional bool on_disk = 2; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 3; } message IntegerIndexParams { - optional bool lookup = 1; // If true - support direct lookups. Default is true. - optional bool range = 2; // If true - support ranges filters. Default is true. - optional bool is_principal = 3; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. Default is false. - optional bool on_disk = 4; // If true - store index on disk. Default is false. + // If true - support direct lookups. Default is true. + optional bool lookup = 1; + // If true - support ranges filters. Default is true. + optional bool range = 2; + // If true - use this key to organize storage of the collection data. + // This option assumes that this key will be used in majority of filtered requests. + // Default is false. + optional bool is_principal = 3; + // If true - store index on disk. Default is false. + optional bool on_disk = 4; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 5; } message FloatIndexParams { - optional bool on_disk = 1; // If true - store index on disk. - optional bool is_principal = 2; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. + // If true - store index on disk. + optional bool on_disk = 1; + // If true - use this key to organize storage of the collection data. + // This option assumes that this key will be used in majority of filtered requests. + optional bool is_principal = 2; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 3; } message GeoIndexParams { - optional bool on_disk = 1; // If true - store index on disk. + // If true - store index on disk. + optional bool on_disk = 1; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 2; } message StopwordsSet { - repeated string languages = 1; // List of languages to use stopwords from - repeated string custom = 2; // List of custom stopwords + // List of languages to use stopwords from + repeated string languages = 1; + // List of custom stopwords + repeated string custom = 2; } message TextIndexParams { - TokenizerType tokenizer = 1; // Tokenizer type - optional bool lowercase = 2; // If true - all tokens will be lowercase - optional uint64 min_token_len = 3; // Minimal token length - optional uint64 max_token_len = 4; // Maximal token length - optional bool on_disk = 5; // If true - store index on disk. - optional StopwordsSet stopwords = 6; // Stopwords for the text index - optional bool phrase_matching = 7; // If true - support phrase matching. - optional StemmingAlgorithm stemmer = 8; // Set an algorithm for stemming. - optional bool ascii_folding = 9; // If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). Default: false. + // Tokenizer type + TokenizerType tokenizer = 1; + // If true - all tokens will be lowercase + optional bool lowercase = 2; + // Minimal token length + optional uint64 min_token_len = 3; + // Maximal token length + optional uint64 max_token_len = 4; + // If true - store index on disk. + optional bool on_disk = 5; + // Stopwords for the text index + optional StopwordsSet stopwords = 6; + // If true - support phrase matching. + optional bool phrase_matching = 7; + // Set an algorithm for stemming. + optional StemmingAlgorithm stemmer = 8; + // If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). + // Default: false. + optional bool ascii_folding = 9; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 10; } message StemmingAlgorithm { oneof stemming_params { - SnowballParams snowball = 1; // Parameters for snowball stemming + // Parameters for snowball stemming + SnowballParams snowball = 1; } } message SnowballParams { - string language = 1; // Which language the algorithm should stem. + // Which language the algorithm should stem. + string language = 1; } message BoolIndexParams { - optional bool on_disk = 1; // If true - store index on disk. + // If true - store index on disk. + optional bool on_disk = 1; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 2; } message DatetimeIndexParams { - optional bool on_disk = 1; // If true - store index on disk. - optional bool is_principal = 2; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. + // If true - store index on disk. + optional bool on_disk = 1; + // If true - use this key to organize storage of the collection data. + // This option assumes that this key will be used in majority of filtered requests. + optional bool is_principal = 2; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 3; } message UuidIndexParams { - optional bool is_tenant = 1; // If true - used for tenant optimization. - optional bool on_disk = 2; // If true - store index on disk. + // If true - used for tenant optimization. + optional bool is_tenant = 1; + // If true - store index on disk. + optional bool on_disk = 2; + // Enable HNSW graph building for this payload field. + // If true, builds additional HNSW links (Need payload_m > 0). + // Default: true. + optional bool enable_hnsw = 3; } message PayloadIndexParams { oneof index_params { - KeywordIndexParams keyword_index_params = 3; // Parameters for keyword index - IntegerIndexParams integer_index_params = 2; // Parameters for integer index - FloatIndexParams float_index_params = 4; // Parameters for float index - GeoIndexParams geo_index_params = 5; // Parameters for geo index - TextIndexParams text_index_params = 1; // Parameters for text index - BoolIndexParams bool_index_params = 6; // Parameters for bool index - DatetimeIndexParams datetime_index_params = 7; // Parameters for datetime index - UuidIndexParams uuid_index_params = 8; // Parameters for uuid index + // Parameters for keyword index + KeywordIndexParams keyword_index_params = 3; + // Parameters for integer index + IntegerIndexParams integer_index_params = 2; + // Parameters for float index + FloatIndexParams float_index_params = 4; + // Parameters for geo index + GeoIndexParams geo_index_params = 5; + // Parameters for text index + TextIndexParams text_index_params = 1; + // Parameters for bool index + BoolIndexParams bool_index_params = 6; + // Parameters for datetime index + DatetimeIndexParams datetime_index_params = 7; + // Parameters for uuid index + UuidIndexParams uuid_index_params = 8; } } message PayloadSchemaInfo { - PayloadSchemaType data_type = 1; // Field data type - optional PayloadIndexParams params = 2; // Field index parameters - optional uint64 points = 3; // Number of points indexed within this field indexed + // Field data type + PayloadSchemaType data_type = 1; + // Field index parameters + optional PayloadIndexParams params = 2; + // Number of points indexed within this field + optional uint64 points = 3; +} + +message UpdateQueueInfo { + // Number of elements in the queue + uint64 length = 1; } message CollectionInfo { - CollectionStatus status = 1; // operating condition of the collection - OptimizerStatus optimizer_status = 2; // status of collection optimizers - reserved 3; // Deprecated - uint64 segments_count = 4; // Number of independent segments - reserved 5; // Deprecated - reserved 6; // Deprecated - CollectionConfig config = 7; // Configuration - map payload_schema = 8; // Collection data types - optional uint64 points_count = 9; // Approximate number of points in the collection - optional uint64 indexed_vectors_count = 10; // Approximate number of indexed vectors in the collection. - repeated CollectionWarning warnings = 11; // Warnings related to the collection + // operating condition of the collection + CollectionStatus status = 1; + // status of collection optimizers + OptimizerStatus optimizer_status = 2; + // Deprecated + reserved 3; + // Number of independent segments + uint64 segments_count = 4; + // Deprecated + reserved 5; + // Deprecated + reserved 6; + // Configuration + CollectionConfig config = 7; + // Collection data types + map payload_schema = 8; + // Approximate number of points in the collection + optional uint64 points_count = 9; + // Approximate number of indexed vectors in the collection. + optional uint64 indexed_vectors_count = 10; + // Warnings related to the collection + repeated CollectionWarning warnings = 11; + // Update queue info + UpdateQueueInfo update_queue = 12; } message ChangeAliases { - repeated AliasOperations actions = 1; // List of actions - optional uint64 timeout = 2; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied + // List of actions + repeated AliasOperations actions = 1; + // Wait timeout for operation commit in seconds. + // If not specified - default value will be supplied. + optional uint64 timeout = 2; } message AliasOperations { @@ -599,79 +797,121 @@ message AliasOperations { } message CreateAlias { - string collection_name = 1; // Name of the collection - string alias_name = 2; // New name of the alias + // Name of the collection + string collection_name = 1; + // New name of the alias + string alias_name = 2; } message RenameAlias { - string old_alias_name = 1; // Name of the alias to rename - string new_alias_name = 2; // Name of the alias + // Name of the alias to rename + string old_alias_name = 1; + // Name of the alias + string new_alias_name = 2; } message DeleteAlias { - string alias_name = 1; // Name of the alias + // Name of the alias + string alias_name = 1; } message ListAliasesRequest {} message ListCollectionAliasesRequest { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; } message AliasDescription { - string alias_name = 1; // Name of the alias - string collection_name = 2; // Name of the collection + // Name of the alias + string alias_name = 1; + // Name of the collection + string collection_name = 2; } message ListAliasesResponse { repeated AliasDescription aliases = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; } message CollectionClusterInfoRequest { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; } enum ReplicaState { - Active = 0; // Active and sound - Dead = 1; // Failed for some reason - Partial = 2; // The shard is partially loaded and is currently receiving data from other shards - Initializing = 3; // Collection is being created - Listener = 4; // A shard which receives data, but is not used for search; Useful for backup shards - PartialSnapshot = 5; // Deprecated: snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard - Recovery = 6; // Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true - Resharding = 7; // Points are being migrated to this shard as part of scale-up resharding - ReshardingScaleDown = 8; // Points are being migrated to this shard as part of scale-down resharding - ActiveRead = 9; // Active for readers, Partial for writers + // Active and sound + Active = 0; + // Failed for some reason + Dead = 1; + // The shard is partially loaded and is currently receiving data from other shards + Partial = 2; + // Collection is being created + Initializing = 3; + // A shard which receives data, but is not used for search. + // Useful for backup shards. + Listener = 4; + // Deprecated: snapshot shard transfer is in progress. + // Updates should not be sent to (and are ignored by) the shard. + PartialSnapshot = 5; + // Shard is undergoing recovery by an external node. + // Normally rejects updates, accepts updates if force is true. + Recovery = 6; + // Points are being migrated to this shard as part of scale-up resharding + Resharding = 7; + // Points are being migrated to this shard as part of scale-down resharding + ReshardingScaleDown = 8; + // Active for readers, Partial for writers + ActiveRead = 9; + // State for manually creation/recovery of a shard. + // Usually when snapshot is uploaded. + // This state is equivalent to `Partial`, except: + // - it can't receive updates + // - it is not treated as broken on startup + ManualRecovery = 10; } message ShardKey { oneof key { - string keyword = 1; // String key - uint64 number = 2; // Number key + // String key + string keyword = 1; + // Number key + uint64 number = 2; } } message LocalShardInfo { - uint32 shard_id = 1; // Local shard id - uint64 points_count = 2; // Number of points in the shard - ReplicaState state = 3; // Is replica active - optional ShardKey shard_key = 4; // User-defined shard key + // Local shard id + uint32 shard_id = 1; + // Number of points in the shard + uint64 points_count = 2; + // Is replica active + ReplicaState state = 3; + // User-defined shard key + optional ShardKey shard_key = 4; } message RemoteShardInfo { - uint32 shard_id = 1; // Local shard id - uint64 peer_id = 2; // Remote peer id - ReplicaState state = 3; // Is replica active - optional ShardKey shard_key = 4; // User-defined shard key + // Local shard id + uint32 shard_id = 1; + // Remote peer id + uint64 peer_id = 2; + // Is replica active + ReplicaState state = 3; + // User-defined shard key + optional ShardKey shard_key = 4; } message ShardTransferInfo { - uint32 shard_id = 1; // Local shard id + // Local shard id + uint32 shard_id = 1; optional uint32 to_shard_id = 5; uint64 from = 2; uint64 to = 3; - bool sync = 4; // If `true` transfer is a synchronization of a replicas; If `false` transfer is a moving of a shard from one peer to another + // If `true` transfer is a synchronization of a replicas; + // If `false` transfer is a moving of a shard from one peer to another + bool sync = 4; } message ReshardingInfo { @@ -681,25 +921,32 @@ message ReshardingInfo { ReshardingDirection direction = 4; } -/* - Resharding direction, scale up or down in number of shards -*/ +// Resharding direction, scale up or down in number of shards enum ReshardingDirection { - Up = 0; // Scale up, add a new shard - Down = 1; // Scale down, remove a shard + // Scale up, add a new shard + Up = 0; + // Scale down, remove a shard + Down = 1; } message CollectionClusterInfoResponse { - uint64 peer_id = 1; // ID of this peer - uint64 shard_count = 2; // Total number of shards - repeated LocalShardInfo local_shards = 3; // Local shards - repeated RemoteShardInfo remote_shards = 4; // Remote shards - repeated ShardTransferInfo shard_transfers = 5; // Shard transfers - repeated ReshardingInfo resharding_operations = 6; // Resharding operations + // ID of this peer + uint64 peer_id = 1; + // Total number of shards + uint64 shard_count = 2; + // Local shards + repeated LocalShardInfo local_shards = 3; + // Remote shards + repeated RemoteShardInfo remote_shards = 4; + // Shard transfers + repeated ShardTransferInfo shard_transfers = 5; + // Resharding operations + repeated ReshardingInfo resharding_operations = 6; } message MoveShard { - uint32 shard_id = 1; // Local shard id + // Local shard id + uint32 shard_id = 1; optional uint32 to_shard_id = 5; uint64 from_peer_id = 2; uint64 to_peer_id = 3; @@ -707,7 +954,8 @@ message MoveShard { } message ReplicateShard { - uint32 shard_id = 1; // Local shard id + // Local shard id + uint32 shard_id = 1; optional uint32 to_shard_id = 5; uint64 from_peer_id = 2; uint64 to_peer_id = 3; @@ -715,14 +963,16 @@ message ReplicateShard { } message AbortShardTransfer { - uint32 shard_id = 1; // Local shard id + // Local shard id + uint32 shard_id = 1; optional uint32 to_shard_id = 4; uint64 from_peer_id = 2; uint64 to_peer_id = 3; } message RestartTransfer { - uint32 shard_id = 1; // Local shard id + // Local shard id + uint32 shard_id = 1; optional uint32 to_shard_id = 5; uint64 from_peer_id = 2; uint64 to_peer_id = 3; @@ -730,16 +980,23 @@ message RestartTransfer { } message ReplicatePoints { - ShardKey from_shard_key = 1; // Source shard key - ShardKey to_shard_key = 2; // Target shard key - optional Filter filter = 3; // If set - only points matching the filter will be replicated + // Source shard key + ShardKey from_shard_key = 1; + // Target shard key + ShardKey to_shard_key = 2; + // If set - only points matching the filter will be replicated + optional Filter filter = 3; } enum ShardTransferMethod { - StreamRecords = 0; // Stream shard records in batches - Snapshot = 1; // Snapshot the shard and recover it on the target peer - WalDelta = 2; // Resolve WAL delta between peers and transfer the difference - ReshardingStreamRecords = 3; // Stream shard records in batches for resharding + // Stream shard records in batches + StreamRecords = 0; + // Snapshot the shard and recover it on the target peer + Snapshot = 1; + // Resolve WAL delta between peers and transfer the difference + WalDelta = 2; + // Stream shard records in batches for resharding + ReshardingStreamRecords = 3; } message Replica { @@ -748,19 +1005,26 @@ message Replica { } message CreateShardKey { - ShardKey shard_key = 1; // User-defined shard key - optional uint32 shards_number = 2; // Number of shards to create per shard key - optional uint32 replication_factor = 3; // Number of replicas of each shard to create - repeated uint64 placement = 4; // List of peer ids, allowed to create shards. If empty - all peers are allowed - optional ReplicaState initial_state = 5; // Initial state of created replicas. Warning: use with care. + // User-defined shard key + ShardKey shard_key = 1; + // Number of shards to create per shard key + optional uint32 shards_number = 2; + // Number of replicas of each shard to create + optional uint32 replication_factor = 3; + // List of peer ids, allowed to create shards. If empty - all peers are allowed + repeated uint64 placement = 4; + // Initial state of created replicas. Warning: use with care. + optional ReplicaState initial_state = 5; } message DeleteShardKey { - ShardKey shard_key = 1; // Shard key to delete + // Shard key to delete + ShardKey shard_key = 1; } message UpdateCollectionClusterSetupRequest { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; oneof operation { MoveShard move_shard = 2; ReplicateShard replicate_shard = 3; @@ -771,7 +1035,9 @@ message UpdateCollectionClusterSetupRequest { RestartTransfer restart_transfer = 9; ReplicatePoints replicate_points = 10; } - optional uint64 timeout = 6; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied + // Wait timeout for operation commit in seconds. + // If not specified - default value will be supplied. + optional uint64 timeout = 6; } message UpdateCollectionClusterSetupResponse { @@ -779,15 +1045,28 @@ message UpdateCollectionClusterSetupResponse { } message CreateShardKeyRequest { - string collection_name = 1; // Name of the collection - CreateShardKey request = 2; // Request to create shard key - optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied + // Name of the collection + string collection_name = 1; + // Request to create shard key + CreateShardKey request = 2; + // Wait timeout for operation commit in seconds. + // If not specified - default value will be supplied. + optional uint64 timeout = 3; } message DeleteShardKeyRequest { - string collection_name = 1; // Name of the collection - DeleteShardKey request = 2; // Request to delete shard key - optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied + // Name of the collection + string collection_name = 1; + // Request to delete shard key + DeleteShardKey request = 2; + // Wait timeout for operation commit in seconds. + // If not specified - default value will be supplied. + optional uint64 timeout = 3; +} + +message ListShardKeysRequest { + // Name of the collection + string collection_name = 1; } message CreateShardKeyResponse { @@ -797,3 +1076,13 @@ message CreateShardKeyResponse { message DeleteShardKeyResponse { bool result = 1; } + +message ShardKeyDescription { + ShardKey key = 1; +} + +message ListShardKeysResponse { + repeated ShardKeyDescription shard_keys = 1; + // Time spent to process + double time = 2; +} diff --git a/proto/collections_service.proto b/proto/collections_service.proto index 52caa8d2..ddb9244b 100644 --- a/proto/collections_service.proto +++ b/proto/collections_service.proto @@ -6,56 +6,36 @@ package qdrant; option csharp_namespace = "Qdrant.Client.Grpc"; service Collections { - /* - Get detailed information about specified existing collection - */ - rpc Get (GetCollectionInfoRequest) returns (GetCollectionInfoResponse) {} - /* - Get list name of all existing collections - */ - rpc List (ListCollectionsRequest) returns (ListCollectionsResponse) {} - /* - Create new collection with given parameters - */ - rpc Create (CreateCollection) returns (CollectionOperationResponse) {} - /* - Update parameters of the existing collection - */ - rpc Update (UpdateCollection) returns (CollectionOperationResponse) {} - /* - Drop collection and all associated data - */ - rpc Delete (DeleteCollection) returns (CollectionOperationResponse) {} - /* - Update Aliases of the existing collection - */ - rpc UpdateAliases (ChangeAliases) returns (CollectionOperationResponse) {} - /* - Get list of all aliases for a collection - */ - rpc ListCollectionAliases (ListCollectionAliasesRequest) returns (ListAliasesResponse) {} - /* - Get list of all aliases for all existing collections - */ - rpc ListAliases (ListAliasesRequest) returns (ListAliasesResponse) {} - /* - Get cluster information for a collection - */ - rpc CollectionClusterInfo (CollectionClusterInfoRequest) returns (CollectionClusterInfoResponse) {} - /* - Check the existence of a collection - */ - rpc CollectionExists (CollectionExistsRequest) returns (CollectionExistsResponse) {} - /* - Update cluster setup for a collection - */ - rpc UpdateCollectionClusterSetup (UpdateCollectionClusterSetupRequest) returns (UpdateCollectionClusterSetupResponse) {} - /* - Create shard key - */ - rpc CreateShardKey (CreateShardKeyRequest) returns (CreateShardKeyResponse) {} - /* - Delete shard key - */ - rpc DeleteShardKey (DeleteShardKeyRequest) returns (DeleteShardKeyResponse) {} + // Get detailed information about specified existing collection + rpc Get(GetCollectionInfoRequest) returns (GetCollectionInfoResponse) {} + // Get list of names of all existing collections + rpc List(ListCollectionsRequest) returns (ListCollectionsResponse) {} + // Create new collection with given parameters + rpc Create(CreateCollection) returns (CollectionOperationResponse) {} + // Update parameters of the existing collection + rpc Update(UpdateCollection) returns (CollectionOperationResponse) {} + // Drop collection and all associated data + rpc Delete(DeleteCollection) returns (CollectionOperationResponse) {} + // Update Aliases of the existing collection + rpc UpdateAliases(ChangeAliases) returns (CollectionOperationResponse) {} + // Get list of all aliases for a collection + rpc ListCollectionAliases(ListCollectionAliasesRequest) + returns (ListAliasesResponse) {} + // Get list of all aliases for all existing collections + rpc ListAliases(ListAliasesRequest) returns (ListAliasesResponse) {} + // Get cluster information for a collection + rpc CollectionClusterInfo(CollectionClusterInfoRequest) + returns (CollectionClusterInfoResponse) {} + // Check the existence of a collection + rpc CollectionExists(CollectionExistsRequest) + returns (CollectionExistsResponse) {} + // Update cluster setup for a collection + rpc UpdateCollectionClusterSetup(UpdateCollectionClusterSetupRequest) + returns (UpdateCollectionClusterSetupResponse) {} + // Create shard key + rpc CreateShardKey(CreateShardKeyRequest) returns (CreateShardKeyResponse) {} + // Delete shard key + rpc DeleteShardKey(DeleteShardKeyRequest) returns (DeleteShardKeyResponse) {} + // List shard keys + rpc ListShardKeys(ListShardKeysRequest) returns (ListShardKeysResponse) {} } diff --git a/proto/common.proto b/proto/common.proto deleted file mode 100644 index 48b391bc..00000000 --- a/proto/common.proto +++ /dev/null @@ -1,141 +0,0 @@ -syntax = "proto3"; -package qdrant; - -option csharp_namespace = "Qdrant.Client.Grpc"; - -import "google/protobuf/timestamp.proto"; - -message PointId { - oneof point_id_options { - uint64 num = 1; // Numerical ID of the point - string uuid = 2; // UUID - } -} - -message GeoPoint { - double lon = 1; - double lat = 2; -} - -message Filter { - repeated Condition should = 1; // At least one of those conditions should match - repeated Condition must = 2; // All conditions must match - repeated Condition must_not = 3; // All conditions must NOT match - optional MinShould min_should = 4; // At least minimum amount of given conditions should match -} - -message MinShould { - repeated Condition conditions = 1; - uint64 min_count = 2; -} - -message Condition { - oneof condition_one_of { - FieldCondition field = 1; - IsEmptyCondition is_empty = 2; - HasIdCondition has_id = 3; - Filter filter = 4; - IsNullCondition is_null = 5; - NestedCondition nested = 6; - HasVectorCondition has_vector = 7; - } -} - -message IsEmptyCondition { - string key = 1; -} - -message IsNullCondition { - string key = 1; -} - -message HasIdCondition { - repeated PointId has_id = 1; -} - -message HasVectorCondition { - string has_vector = 1; -} - -message NestedCondition { - string key = 1; // Path to nested object - Filter filter = 2; // Filter condition -} - -message FieldCondition { - string key = 1; - Match match = 2; // Check if point has field with a given value - Range range = 3; // Check if points value lies in a given range - GeoBoundingBox geo_bounding_box = 4; // Check if points geolocation lies in a given area - GeoRadius geo_radius = 5; // Check if geo point is within a given radius - ValuesCount values_count = 6; // Check number of values for a specific field - GeoPolygon geo_polygon = 7; // Check if geo point is within a given polygon - DatetimeRange datetime_range = 8; // Check if datetime is within a given range - optional bool is_empty = 9; // Check if field is empty - optional bool is_null = 10; // Check if field is null -} - -message Match { - oneof match_value { - string keyword = 1; // Match string keyword - int64 integer = 2; // Match integer - bool boolean = 3; // Match boolean - string text = 4; // Match text - RepeatedStrings keywords = 5; // Match multiple keywords - RepeatedIntegers integers = 6; // Match multiple integers - RepeatedIntegers except_integers = 7; // Match any other value except those integers - RepeatedStrings except_keywords = 8; // Match any other value except those keywords - string phrase = 9; // Match phrase text - string text_any = 10; // Match any word in the text - } -} - -message RepeatedStrings { - repeated string strings = 1; -} - -message RepeatedIntegers { - repeated int64 integers = 1; -} - -message Range { - optional double lt = 1; - optional double gt = 2; - optional double gte = 3; - optional double lte = 4; -} - -message DatetimeRange { - optional google.protobuf.Timestamp lt = 1; - optional google.protobuf.Timestamp gt = 2; - optional google.protobuf.Timestamp gte = 3; - optional google.protobuf.Timestamp lte = 4; -} - -message GeoBoundingBox { - GeoPoint top_left = 1; // north-west corner - GeoPoint bottom_right = 2; // south-east corner -} - -message GeoRadius { - GeoPoint center = 1; // Center of the circle - float radius = 2; // In meters -} - -message GeoLineString { - repeated GeoPoint points = 1; // Ordered sequence of GeoPoints representing the line -} - -// For a valid GeoPolygon, both the exterior and interior GeoLineStrings must consist of a minimum of 4 points. -// Additionally, the first and last points of each GeoLineString must be the same. -message GeoPolygon { - GeoLineString exterior = 1; // The exterior line bounds the surface - repeated GeoLineString interiors = 2; // Interior lines (if present) bound holes within the surface -} - -message ValuesCount { - optional uint64 lt = 1; - optional uint64 gt = 2; - optional uint64 gte = 3; - optional uint64 lte = 4; -} \ No newline at end of file diff --git a/proto/points.proto b/proto/points.proto index fd50fa93..b03adc3f 100644 --- a/proto/points.proto +++ b/proto/points.proto @@ -4,31 +4,52 @@ package qdrant; option csharp_namespace = "Qdrant.Client.Grpc"; import "collections.proto"; -import "common.proto"; +import "qdrant_common.proto"; import "google/protobuf/timestamp.proto"; import "json_with_int.proto"; - enum WriteOrderingType { - Weak = 0; // Write operations may be reordered, works faster, default - Medium = 1; // Write operations go through dynamically selected leader, may be inconsistent for a short period of time in case of leader change - Strong = 2; // Write operations go through the permanent leader, consistent, but may be unavailable if leader is down + // Write operations may be reordered, works faster, default + Weak = 0; + // Write operations go through dynamically selected leader, + // may be inconsistent for a short period of time in case of leader change + Medium = 1; + // Write operations go through the permanent leader, consistent, + // but may be unavailable if leader is down + Strong = 2; +} + +// Defines the mode of the upsert operation +enum UpdateMode { + // Default mode - insert new points, update existing points + Upsert = 0; + // Only insert new points, do not update existing points + InsertOnly = 1; + // Only update existing points, do not insert new points + UpdateOnly = 2; } message WriteOrdering { - WriteOrderingType type = 1; // Write ordering guarantees + // Write ordering guarantees + WriteOrderingType type = 1; } enum ReadConsistencyType { - All = 0; // Send request to all nodes and return points which are present on all of them - Majority = 1; // Send requests to all nodes and return points which are present on majority of them - Quorum = 2; // Send requests to half + 1 nodes, return points which are present on all of them + // Send request to all nodes and return points which are present on all of them + All = 0; + // Send requests to all nodes and return points which are present on majority of them + Majority = 1; + // Send requests to half + 1 nodes, return points which are present on all of them + Quorum = 2; } message ReadConsistency { oneof value { - ReadConsistencyType type = 1; // Common read consistency configurations - uint64 factor = 2; // Send request to a specified number of nodes, and return points which are present on all of them + // Common read consistency configurations + ReadConsistencyType type = 1; + // Send request to a specified number of nodes, + // and return points which are present on all of them + uint64 factor = 2; } } @@ -37,31 +58,46 @@ message SparseIndices { } message Document { - string text = 1; // Text of the document - string model = 3; // Model name - map options = 4; // Model options + // Text of the document + string text = 1; + // Model name + string model = 3; + // Model options + map options = 4; } message Image { - Value image = 1; // Image data, either base64 encoded or URL - string model = 2; // Model name - map options = 3; // Model options + // Image data, either base64 encoded or URL + Value image = 1; + // Model name + string model = 2; + // Model options + map options = 3; } message InferenceObject { - Value object = 1; // Object to infer - string model = 2; // Model name - map options = 3; // Model options + // Object to infer + Value object = 1; + // Model name + string model = 2; + // Model options + map options = 3; } message Vector { - repeated float data = 1 [deprecated=true]; // Vector data (flatten for multi vectors), deprecated - optional SparseIndices indices = 2 [deprecated=true]; // Sparse indices for sparse vectors, deprecated - optional uint32 vectors_count = 3 [deprecated=true]; // Number of vectors per multi vector, deprecated + // Vector data (flatten for multi vectors), deprecated + repeated float data = 1 [deprecated = true]; + // Sparse indices for sparse vectors, deprecated + optional SparseIndices indices = 2 [deprecated = true]; + // Number of vectors per multi vector, deprecated + optional uint32 vectors_count = 3 [deprecated = true]; oneof vector { - DenseVector dense = 101; // Dense vector - SparseVector sparse = 102; // Sparse vector - MultiDenseVector multi_dense = 103; // Multi dense vector + // Dense vector + DenseVector dense = 101; + // Sparse vector + SparseVector sparse = 102; + // Multi dense vector + MultiDenseVector multi_dense = 103; Document document = 104; Image image = 105; InferenceObject object = 106; @@ -69,13 +105,19 @@ message Vector { } message VectorOutput { - repeated float data = 1 [deprecated=true]; // Vector data (flatten for multi vectors), deprecated - optional SparseIndices indices = 2 [deprecated=true]; // Sparse indices for sparse vectors, deprecated - optional uint32 vectors_count = 3 [deprecated=true]; // Number of vectors per multi vector, deprecated + // Vector data (flatten for multi vectors), deprecated + repeated float data = 1 [deprecated = true]; + // Sparse indices for sparse vectors, deprecated + optional SparseIndices indices = 2 [deprecated = true]; + // Number of vectors per multi vector, deprecated + optional uint32 vectors_count = 3 [deprecated = true]; oneof vector { - DenseVector dense = 101; // Dense vector - SparseVector sparse = 102; // Sparse vector - MultiDenseVector multi_dense = 103; // Multi dense vector + // Dense vector + DenseVector dense = 101; + // Sparse vector + SparseVector sparse = 102; + // Multi dense vector + MultiDenseVector multi_dense = 103; } } @@ -92,7 +134,8 @@ message MultiDenseVector { repeated DenseVector vectors = 1; } -// Vector type to be used in queries. Ids will be substituted with their corresponding vectors from the collection. +// Vector type to be used in queries. +// Ids will be substituted with their corresponding vectors from the collection. message VectorInput { oneof variant { PointId id = 1; @@ -110,93 +153,162 @@ message VectorInput { // --------------------------------------------- message ShardKeySelector { - repeated ShardKey shard_keys = 1; // List of shard keys which should be used in the request + // List of shard keys which should be used in the request + repeated ShardKey shard_keys = 1; optional ShardKey fallback = 2; } - // --------------------------------------------- // ---------------- RPC Requests --------------- // --------------------------------------------- message UpsertPoints { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; repeated PointStruct points = 3; - optional WriteOrdering ordering = 4; // Write ordering guarantees - optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys - optional Filter update_filter = 6; // If specified, only points that match this filter will be updated, others will be inserted + // Write ordering guarantees + optional WriteOrdering ordering = 4; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 5; + // Filter to apply when updating existing points. Only points matching this filter will be updated. + // Points that don't match will keep their current state. New points will be inserted regardless of the filter. + optional Filter update_filter = 6; + // Timeout for the request in seconds + optional uint64 timeout = 7; + // Mode of the upsert operation: insert_only, upsert (default), update_only + optional UpdateMode update_mode = 8; } message DeletePoints { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - PointsSelector points = 3; // Affected points - optional WriteOrdering ordering = 4; // Write ordering guarantees - optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // Affected points + PointsSelector points = 3; + // Write ordering guarantees + optional WriteOrdering ordering = 4; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 5; + // Timeout for the request in seconds + optional uint64 timeout = 6; } message GetPoints { - string collection_name = 1; // name of the collection - repeated PointId ids = 2; // List of points to retrieve - reserved 3; // deprecated "with_vector" field - WithPayloadSelector with_payload = 4; // Options for specifying which payload to include or not - optional WithVectorsSelector with_vectors = 5; // Options for specifying which vectors to include into response - optional ReadConsistency read_consistency = 6; // Options for specifying read consistency guarantees - optional ShardKeySelector shard_key_selector = 7; // Specify in which shards to look for the points, if not specified - look in all shards - optional uint64 timeout = 8; // If set, overrides global timeout setting for this request. Unit is seconds. + // name of the collection + string collection_name = 1; + // List of points to retrieve + repeated PointId ids = 2; + // deprecated "with_vector" field + reserved 3; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 4; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 5; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 6; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 7; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 8; } message UpdatePointVectors { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - repeated PointVectors points = 3; // List of points and vectors to update - optional WriteOrdering ordering = 4; // Write ordering guarantees - optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys - optional Filter update_filter = 6; // If specified, only points that match this filter will be updated + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // List of points and vectors to update + repeated PointVectors points = 3; + // Write ordering guarantees + optional WriteOrdering ordering = 4; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 5; + // If specified, only points that match this filter will be updated + optional Filter update_filter = 6; + // Timeout for the request in seconds + optional uint64 timeout = 7; } message PointVectors { - PointId id = 1; // ID to update vectors for - Vectors vectors = 2; // Named vectors to update, leave others intact + // ID to update vectors for + PointId id = 1; + // Named vectors to update, leave others intact + Vectors vectors = 2; } message DeletePointVectors { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - PointsSelector points_selector = 3; // Affected points - VectorsSelector vectors = 4; // List of vector names to delete - optional WriteOrdering ordering = 5; // Write ordering guarantees - optional ShardKeySelector shard_key_selector = 6; // Option for custom sharding to specify used shard keys + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // Affected points + PointsSelector points_selector = 3; + // List of vector names to delete + VectorsSelector vectors = 4; + // Write ordering guarantees + optional WriteOrdering ordering = 5; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 6; + // Timeout for the request in seconds + optional uint64 timeout = 7; } message SetPayloadPoints { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - map payload = 3; // New payload values - reserved 4; // List of point to modify, deprecated - optional PointsSelector points_selector = 5; // Affected points - optional WriteOrdering ordering = 6; // Write ordering guarantees - optional ShardKeySelector shard_key_selector = 7; // Option for custom sharding to specify used shard keys - optional string key = 8; // Option for indicate property of payload + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // New payload values + map payload = 3; + // List of point to modify, deprecated + reserved 4; + // Affected points + optional PointsSelector points_selector = 5; + // Write ordering guarantees + optional WriteOrdering ordering = 6; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 7; + // Option for indicate property of payload + optional string key = 8; + // Timeout for the request in seconds + optional uint64 timeout = 9; } message DeletePayloadPoints { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - repeated string keys = 3; // List of keys to delete - reserved 4; // Affected points, deprecated - optional PointsSelector points_selector = 5; // Affected points - optional WriteOrdering ordering = 6; // Write ordering guarantees - optional ShardKeySelector shard_key_selector = 7; // Option for custom sharding to specify used shard keys + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // List of keys to delete + repeated string keys = 3; + // Affected points, deprecated + reserved 4; + // Affected points + optional PointsSelector points_selector = 5; + // Write ordering guarantees + optional WriteOrdering ordering = 6; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 7; + // Timeout for the request in seconds + optional uint64 timeout = 8; } message ClearPayloadPoints { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - PointsSelector points = 3; // Affected points - optional WriteOrdering ordering = 4; // Write ordering guarantees - optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // Affected points + PointsSelector points = 3; + // Write ordering guarantees + optional WriteOrdering ordering = 4; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 5; + // Timeout for the request in seconds + optional uint64 timeout = 6; } enum FieldType { @@ -211,32 +323,49 @@ enum FieldType { } message CreateFieldIndexCollection { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - string field_name = 3; // Field name to index - optional FieldType field_type = 4; // Field type. - optional PayloadIndexParams field_index_params = 5; // Payload index params. - optional WriteOrdering ordering = 6; // Write ordering guarantees + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // Field name to index + string field_name = 3; + // Field type. + optional FieldType field_type = 4; + // Payload index params. + optional PayloadIndexParams field_index_params = 5; + // Write ordering guarantees + optional WriteOrdering ordering = 6; + // Timeout for the request in seconds + optional uint64 timeout = 7; } message DeleteFieldIndexCollection { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? - string field_name = 3; // Field name to delete - optional WriteOrdering ordering = 4; // Write ordering guarantees + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; + // Field name to delete + string field_name = 3; + // Write ordering guarantees + optional WriteOrdering ordering = 4; + // Timeout for the request in seconds + optional uint64 timeout = 5; } message PayloadIncludeSelector { - repeated string fields = 1; // List of payload keys to include into result + // List of payload keys to include into result + repeated string fields = 1; } message PayloadExcludeSelector { - repeated string fields = 1; // List of payload keys to exclude from the result + // List of payload keys to exclude from the result + repeated string fields = 1; } message WithPayloadSelector { oneof selector_options { - bool enable = 1; // If `true` - return all payload, if `false` - none + // If `true` - return all payload, if `false` - none + bool enable = 1; PayloadIncludeSelector include = 2; PayloadExcludeSelector exclude = 3; } @@ -265,138 +394,159 @@ message VectorsOutput { } message VectorsSelector { - repeated string names = 1; // List of vectors to include into result + // List of vectors to include into result + repeated string names = 1; } message WithVectorsSelector { oneof selector_options { - bool enable = 1; // If `true` - return all vectors, if `false` - none - VectorsSelector include = 2; // List of payload keys to include into result + // If `true` - return all vectors, if `false` - none + bool enable = 1; + // List of vectors to include into result + VectorsSelector include = 2; } } message QuantizationSearchParams { - /* - If set to true, search will ignore quantized vector data - */ + // If set to true, search will ignore quantized vector data optional bool ignore = 1; - /* - If true, use original vectors to re-score top-k results. If ignored, qdrant decides automatically does rescore enabled or not. - */ + // If true, use original vectors to re-score top-k results. + // If ignored, qdrant decides automatically does rescore enabled or not. optional bool rescore = 2; - /* - Oversampling factor for quantization. - - Defines how many extra vectors should be pre-selected using quantized index, - and then re-scored using original vectors. - - For example, if `oversampling` is 2.4 and `limit` is 100, then 240 vectors will be pre-selected using quantized index, - and then top-100 will be returned after re-scoring. - */ + // Oversampling factor for quantization. + // + // Defines how many extra vectors should be pre-selected using quantized index, + // and then re-scored using original vectors. + // + // For example, if `oversampling` is 2.4 and `limit` is 100, + // then 240 vectors will be pre-selected using quantized index, + // and then top-100 will be returned after re-scoring. optional double oversampling = 3; } message AcornSearchParams { - /* - If true, then ACORN may be used for the HNSW search based on filters - selectivity. - - Improves search recall for searches with multiple low-selectivity - payload filters, at cost of performance. - */ + // If true, then ACORN may be used for the HNSW search based on filters + // selectivity. + // + // Improves search recall for searches with multiple low-selectivity + // payload filters, at cost of performance. optional bool enable = 1; - /* - Maximum selectivity of filters to enable ACORN. - - If estimated filters selectivity is higher than this value, - ACORN will not be used. Selectivity is estimated as: - `estimated number of points satisfying the filters / total number of points`. - - 0.0 for never, 1.0 for always. Default is 0.4. - */ + // Maximum selectivity of filters to enable ACORN. + // + // If estimated filters selectivity is higher than this value, + // ACORN will not be used. Selectivity is estimated as: + // `estimated number of points satisfying the filters / total number of points`. + // + // 0.0 for never, 1.0 for always. Default is 0.4. optional double max_selectivity = 2; } message SearchParams { - /* - Params relevant to HNSW index. Size of the beam in a beam-search. - Larger the value - more accurate the result, more time required for search. - */ + // Params relevant to HNSW index. Size of the beam in a beam-search. + // Larger the value - more accurate the result, more time required for search. optional uint64 hnsw_ef = 1; - /* - Search without approximation. If set to true, search may run long but with exact results. - */ + // Search without approximation. If set to true, search may run long but with exact results. optional bool exact = 2; - /* - If set to true, search will ignore quantized vector data - */ + // If set to true, search will ignore quantized vector data optional QuantizationSearchParams quantization = 3; - /* - If enabled, the engine will only perform search among indexed or small segments. - Using this option prevents slow searches in case of delayed index, but does not - guarantee that all uploaded vectors will be included in search results - */ + // If enabled, the engine will only perform search among indexed or small segments. + // Using this option prevents slow searches in case of delayed index, but does not + // guarantee that all uploaded vectors will be included in search results optional bool indexed_only = 4; - /* - ACORN search params - */ + // ACORN search params optional AcornSearchParams acorn = 5; } message SearchPoints { - string collection_name = 1; // name of the collection - repeated float vector = 2; // vector - Filter filter = 3; // Filter conditions - return only those points that satisfy the specified conditions - uint64 limit = 4; // Max number of result - reserved 5; // deprecated "with_vector" field - WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not - SearchParams params = 7; // Search config - optional float score_threshold = 8; // If provided - cut off results with worse scores - optional uint64 offset = 9; // Offset of the result - optional string vector_name = 10; // Which vector to use for search, if not specified - use default vector - optional WithVectorsSelector with_vectors = 11; // Options for specifying which vectors to include into response - optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees - optional uint64 timeout = 13; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ShardKeySelector shard_key_selector = 14; // Specify in which shards to look for the points, if not specified - look in all shards + // name of the collection + string collection_name = 1; + // vector + repeated float vector = 2; + // Filter conditions - return only those points that satisfy the specified conditions + Filter filter = 3; + // Max number of result + uint64 limit = 4; + // deprecated "with_vector" field + reserved 5; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 6; + // Search config + SearchParams params = 7; + // If provided - cut off results with worse scores + optional float score_threshold = 8; + // Offset of the result + optional uint64 offset = 9; + // Which vector to use for search, if not specified - use default vector + optional string vector_name = 10; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 11; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 12; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 13; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 14; optional SparseIndices sparse_indices = 15; } message SearchBatchPoints { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; repeated SearchPoints search_points = 2; - optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees - optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds. + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 3; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 4; } message WithLookup { - string collection = 1; // Name of the collection to use for points lookup - optional WithPayloadSelector with_payload = 2; // Options for specifying which payload to include (or not) - optional WithVectorsSelector with_vectors = 3; // Options for specifying which vectors to include (or not) + // Name of the collection to use for points lookup + string collection = 1; + // Options for specifying which payload to include (or not) + optional WithPayloadSelector with_payload = 2; + // Options for specifying which vectors to include (or not) + optional WithVectorsSelector with_vectors = 3; } - message SearchPointGroups { - string collection_name = 1; // Name of the collection - repeated float vector = 2; // Vector to compare against - Filter filter = 3; // Filter conditions - return only those points that satisfy the specified conditions - uint32 limit = 4; // Max number of result - WithPayloadSelector with_payload = 5; // Options for specifying which payload to include or not - SearchParams params = 6; // Search config - optional float score_threshold = 7; // If provided - cut off results with worse scores - optional string vector_name = 8; // Which vector to use for search, if not specified - use default vector - optional WithVectorsSelector with_vectors = 9; // Options for specifying which vectors to include into response - string group_by = 10; // Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups. - uint32 group_size = 11; // Maximum amount of points to return per group - optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees - optional WithLookup with_lookup = 13; // Options for specifying how to use the group id to lookup points in another collection - optional uint64 timeout = 14; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ShardKeySelector shard_key_selector = 15; // Specify in which shards to look for the points, if not specified - look in all shards + // Name of the collection + string collection_name = 1; + // Vector to compare against + repeated float vector = 2; + // Filter conditions - return only those points that satisfy the specified conditions + Filter filter = 3; + // Max number of result + uint32 limit = 4; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 5; + // Search config + SearchParams params = 6; + // If provided - cut off results with worse scores + optional float score_threshold = 7; + // Which vector to use for search, if not specified - use default vector + optional string vector_name = 8; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 9; + // Payload field to group by, must be a string or number field. + // If there are multiple values for the field, all of them will be used. + // One point can be in multiple groups. + string group_by = 10; + // Maximum amount of points to return per group + uint32 group_size = 11; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 12; + // Options for specifying how to use the group id to lookup points in another collection + optional WithLookup with_lookup = 13; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 14; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 15; optional SparseIndices sparse_indices = 16; } @@ -415,23 +565,36 @@ message StartFrom { } message OrderBy { - string key = 1; // Payload key to order by - optional Direction direction = 2; // Ascending or descending order - optional StartFrom start_from = 3; // Start from this value + // Payload key to order by + string key = 1; + // Ascending or descending order + optional Direction direction = 2; + // Start from this value + optional StartFrom start_from = 3; } message ScrollPoints { string collection_name = 1; - Filter filter = 2; // Filter conditions - return only those points that satisfy the specified conditions - optional PointId offset = 3; // Start with this ID - optional uint32 limit = 4; // Max number of result - reserved 5; // deprecated "with_vector" field - WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not - optional WithVectorsSelector with_vectors = 7; // Options for specifying which vectors to include into response - optional ReadConsistency read_consistency = 8; // Options for specifying read consistency guarantees - optional ShardKeySelector shard_key_selector = 9; // Specify in which shards to look for the points, if not specified - look in all shards - optional OrderBy order_by = 10; // Order the records by a payload field - optional uint64 timeout = 11; // If set, overrides global timeout setting for this request. Unit is seconds. + // Filter conditions - return only those points that satisfy the specified conditions + Filter filter = 2; + // Start with this ID + optional PointId offset = 3; + // Max number of result + optional uint32 limit = 4; + // deprecated "with_vector" field + reserved 5; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 6; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 7; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 8; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 9; + // Order the records by a payload field + optional OrderBy order_by = 10; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 11; } // How to use positive and negative vectors to find the results, default is `AverageVector`. @@ -452,60 +615,106 @@ enum RecommendStrategy { message LookupLocation { string collection_name = 1; - optional string vector_name = 2; // Which vector to use for search, if not specified - use default vector - optional ShardKeySelector shard_key_selector = 3; // Specify in which shards to look for the points, if not specified - look in all shards + // Which vector to use for search, if not specified - use default vector + optional string vector_name = 2; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 3; } message RecommendPoints { - string collection_name = 1; // name of the collection - repeated PointId positive = 2; // Look for vectors closest to the vectors from these points - repeated PointId negative = 3; // Try to avoid vectors like the vector from these points - Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions - uint64 limit = 5; // Max number of result - reserved 6; // deprecated "with_vector" field - WithPayloadSelector with_payload = 7; // Options for specifying which payload to include or not - SearchParams params = 8; // Search config - optional float score_threshold = 9; // If provided - cut off results with worse scores - optional uint64 offset = 10; // Offset of the result - optional string using = 11; // Define which vector to use for recommendation, if not specified - default vector - optional WithVectorsSelector with_vectors = 12; // Options for specifying which vectors to include into response - optional LookupLocation lookup_from = 13; // Name of the collection to use for points lookup, if not specified - use current collection - optional ReadConsistency read_consistency = 14; // Options for specifying read consistency guarantees - optional RecommendStrategy strategy = 16; // How to use the example vectors to find the results - repeated Vector positive_vectors = 17; // Look for vectors closest to those - repeated Vector negative_vectors = 18; // Try to avoid vectors like this - optional uint64 timeout = 19; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ShardKeySelector shard_key_selector = 20; // Specify in which shards to look for the points, if not specified - look in all shards + // name of the collection + string collection_name = 1; + // Look for vectors closest to the vectors from these points + repeated PointId positive = 2; + // Try to avoid vectors like the vector from these points + repeated PointId negative = 3; + // Filter conditions - return only those points that satisfy the specified conditions + Filter filter = 4; + // Max number of result + uint64 limit = 5; + // deprecated "with_vector" field + reserved 6; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 7; + // Search config + SearchParams params = 8; + // If provided - cut off results with worse scores + optional float score_threshold = 9; + // Offset of the result + optional uint64 offset = 10; + // Define which vector to use for recommendation, if not specified - default vector + optional string using = 11; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 12; + // Name of the collection to use for points lookup, if not specified - use current collection + optional LookupLocation lookup_from = 13; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 14; + // How to use the example vectors to find the results + optional RecommendStrategy strategy = 16; + // Look for vectors closest to those + repeated Vector positive_vectors = 17; + // Try to avoid vectors like this + repeated Vector negative_vectors = 18; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 19; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 20; } message RecommendBatchPoints { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; repeated RecommendPoints recommend_points = 2; - optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees - optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds. + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 3; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 4; } message RecommendPointGroups { - string collection_name = 1; // Name of the collection - repeated PointId positive = 2; // Look for vectors closest to the vectors from these points - repeated PointId negative = 3; // Try to avoid vectors like the vector from these points - Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions - uint32 limit = 5; // Max number of groups in result - WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not - SearchParams params = 7; // Search config - optional float score_threshold = 8; // If provided - cut off results with worse scores - optional string using = 9; // Define which vector to use for recommendation, if not specified - default vector - optional WithVectorsSelector with_vectors = 10; // Options for specifying which vectors to include into response - optional LookupLocation lookup_from = 11; // Name of the collection to use for points lookup, if not specified - use current collection - string group_by = 12; // Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups. - uint32 group_size = 13; // Maximum amount of points to return per group - optional ReadConsistency read_consistency = 14; // Options for specifying read consistency guarantees - optional WithLookup with_lookup = 15; // Options for specifying how to use the group id to lookup points in another collection - optional RecommendStrategy strategy = 17; // How to use the example vectors to find the results - repeated Vector positive_vectors = 18; // Look for vectors closest to those - repeated Vector negative_vectors = 19; // Try to avoid vectors like this - optional uint64 timeout = 20; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ShardKeySelector shard_key_selector = 21; // Specify in which shards to look for the points, if not specified - look in all shards + // Name of the collection + string collection_name = 1; + // Look for vectors closest to the vectors from these points + repeated PointId positive = 2; + // Try to avoid vectors like the vector from these points + repeated PointId negative = 3; + // Filter conditions - return only those points that satisfy the specified conditions + Filter filter = 4; + // Max number of groups in result + uint32 limit = 5; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 6; + // Search config + SearchParams params = 7; + // If provided - cut off results with worse scores + optional float score_threshold = 8; + // Define which vector to use for recommendation, if not specified - default vector + optional string using = 9; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 10; + // Name of the collection to use for points lookup, if not specified - use current collection + optional LookupLocation lookup_from = 11; + // Payload field to group by, must be a string or number field. + // If there are multiple values for the field, all of them will be used. + // One point can be in multiple groups. + string group_by = 12; + // Maximum amount of points to return per group + uint32 group_size = 13; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 14; + // Options for specifying how to use the group id to lookup points in another collection + optional WithLookup with_lookup = 15; + // How to use the example vectors to find the results + optional RecommendStrategy strategy = 17; + // Look for vectors closest to those + repeated Vector positive_vectors = 18; + // Try to avoid vectors like this + repeated Vector negative_vectors = 19; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 20; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 21; } message TargetVector { @@ -529,61 +738,121 @@ message ContextExamplePair { } message DiscoverPoints { - string collection_name = 1; // name of the collection - TargetVector target = 2; // Use this as the primary search objective - repeated ContextExamplePair context = 3; // Search will be constrained by these pairs of examples - Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions - uint64 limit = 5; // Max number of result - WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not - SearchParams params = 7; // Search config - optional uint64 offset = 8; // Offset of the result - optional string using = 9; // Define which vector to use for recommendation, if not specified - default vector - optional WithVectorsSelector with_vectors = 10; // Options for specifying which vectors to include into response - optional LookupLocation lookup_from = 11; // Name of the collection to use for points lookup, if not specified - use current collection - optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees - optional uint64 timeout = 13; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ShardKeySelector shard_key_selector = 14; // Specify in which shards to look for the points, if not specified - look in all shards + // name of the collection + string collection_name = 1; + // Use this as the primary search objective + TargetVector target = 2; + // Search will be constrained by these pairs of examples + repeated ContextExamplePair context = 3; + // Filter conditions - return only those points that satisfy the specified conditions + Filter filter = 4; + // Max number of result + uint64 limit = 5; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 6; + // Search config + SearchParams params = 7; + // Offset of the result + optional uint64 offset = 8; + // Define which vector to use for recommendation, if not specified - default vector + optional string using = 9; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 10; + // Name of the collection to use for points lookup, if not specified - use current collection + optional LookupLocation lookup_from = 11; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 12; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 13; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 14; } message DiscoverBatchPoints { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; repeated DiscoverPoints discover_points = 2; - optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees - optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds. + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 3; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 4; } message CountPoints { - string collection_name = 1; // Name of the collection - Filter filter = 2; // Filter conditions - return only those points that satisfy the specified conditions - optional bool exact = 3; // If `true` - return exact count, if `false` - return approximate count - optional ReadConsistency read_consistency = 4; // Options for specifying read consistency guarantees - optional ShardKeySelector shard_key_selector = 5; // Specify in which shards to look for the points, if not specified - look in all shards - optional uint64 timeout = 6; // If set, overrides global timeout setting for this request. Unit is seconds. + // Name of the collection + string collection_name = 1; + // Filter conditions - return only those points that satisfy the specified conditions + Filter filter = 2; + // If `true` - return exact count, if `false` - return approximate count + optional bool exact = 3; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 4; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 5; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 6; } message RecommendInput { - repeated VectorInput positive = 1; // Look for vectors closest to the vectors from these points - repeated VectorInput negative = 2; // Try to avoid vectors like the vector from these points - optional RecommendStrategy strategy = 3; // How to use the provided vectors to find the results + // Look for vectors closest to the vectors from these points + repeated VectorInput positive = 1; + // Try to avoid vectors like the vector from these points + repeated VectorInput negative = 2; + // How to use the provided vectors to find the results + optional RecommendStrategy strategy = 3; } message ContextInputPair { - VectorInput positive = 1; // A positive vector - VectorInput negative = 2; // Repel from this vector + // A positive vector + VectorInput positive = 1; + // Repel from this vector + VectorInput negative = 2; } message DiscoverInput { - VectorInput target = 1; // Use this as the primary search objective - ContextInput context = 2; // Search space will be constrained by these pairs of vectors + // Use this as the primary search objective + VectorInput target = 1; + // Search space will be constrained by these pairs of vectors + ContextInput context = 2; } message ContextInput { - repeated ContextInputPair pairs = 1; // Search space will be constrained by these pairs of vectors + // Search space will be constrained by these pairs of vectors + repeated ContextInputPair pairs = 1; +} + +message RelevanceFeedbackInput { + // The original query vector + VectorInput target = 1; + // Previous results scored by the feedback provider. + repeated FeedbackItem feedback = 2; + // Formula and trained coefficients to use. + FeedbackStrategy strategy = 3; +} + +message FeedbackItem { + VectorInput example = 1; // The id or vector from the original model + float score = 2; // Score for this vector as determined by the feedback provider +} + +message FeedbackStrategy { + oneof variant { + // a * score + sim(confidence^b * c * delta) + NaiveFeedbackStrategy naive = 1; + } +} + +message NaiveFeedbackStrategy { + float a = 1; + float b = 2; + float c = 3; } enum Fusion { - RRF = 0; // Reciprocal Rank Fusion (with default parameters) - DBSF = 1; // Distribution-Based Score Fusion + // Reciprocal Rank Fusion (with default parameters) + RRF = 0; + // Distribution-Based Score Fusion + DBSF = 1; } // Sample points from the collection @@ -592,278 +861,424 @@ enum Fusion { // // * `random` - Random sampling enum Sample { - Random = 0; + Random = 0; } message Formula { - Expression expression = 1; - map defaults = 2; + Expression expression = 1; + map defaults = 2; } message Expression { - oneof variant { - float constant = 1; - string variable = 2; // Payload key or reference to score. - Condition condition = 3; // Payload condition. If true, becomes 1.0; otherwise 0.0 - GeoDistance geo_distance = 4; // Geographic distance in meters - string datetime = 5; // Date-time constant - string datetime_key = 6; // Payload key with date-time values - MultExpression mult = 7; // Multiply - SumExpression sum = 8; // Sum - DivExpression div = 9; // Divide - Expression neg = 10; // Negate - Expression abs = 11; // Absolute value - Expression sqrt = 12; // Square root - PowExpression pow = 13; // Power - Expression exp = 14; // Exponential - Expression log10 = 15; // Logarithm - Expression ln = 16; // Natural logarithm - DecayParamsExpression exp_decay = 17; // Exponential decay - DecayParamsExpression gauss_decay = 18; // Gaussian decay - DecayParamsExpression lin_decay = 19; // Linear decay - } + oneof variant { + float constant = 1; + // Payload key or reference to score. + string variable = 2; + // Payload condition. If true, becomes 1.0; otherwise 0.0 + Condition condition = 3; + // Geographic distance in meters + GeoDistance geo_distance = 4; + // Date-time constant + string datetime = 5; + // Payload key with date-time values + string datetime_key = 6; + // Multiply + MultExpression mult = 7; + // Sum + SumExpression sum = 8; + // Divide + DivExpression div = 9; + // Negate + Expression neg = 10; + // Absolute value + Expression abs = 11; + // Square root + Expression sqrt = 12; + // Power + PowExpression pow = 13; + // Exponential + Expression exp = 14; + // Logarithm + Expression log10 = 15; + // Natural logarithm + Expression ln = 16; + // Exponential decay + DecayParamsExpression exp_decay = 17; + // Gaussian decay + DecayParamsExpression gauss_decay = 18; + // Linear decay + DecayParamsExpression lin_decay = 19; + } } message GeoDistance { - GeoPoint origin = 1; - string to = 2; + GeoPoint origin = 1; + string to = 2; } message MultExpression { - repeated Expression mult = 1; + repeated Expression mult = 1; } message SumExpression { - repeated Expression sum = 1; + repeated Expression sum = 1; } message DivExpression { - Expression left = 1; - Expression right = 2; - optional float by_zero_default = 3; + Expression left = 1; + Expression right = 2; + optional float by_zero_default = 3; } message PowExpression { - Expression base = 1; - Expression exponent = 2; + Expression base = 1; + Expression exponent = 2; } message DecayParamsExpression { - // The variable to decay - Expression x = 1; - // The target value to start decaying from. Defaults to 0. - optional Expression target = 2; - // The scale factor of the decay, in terms of `x`. Defaults to 1.0. Must be a non-zero positive number. - optional float scale = 3; - // The midpoint of the decay. Should be between 0 and 1. Defaults to 0.5. Output will be this value when `|x - target| == scale`. - optional float midpoint = 4; + // The variable to decay + Expression x = 1; + // The target value to start decaying from. Defaults to 0. + optional Expression target = 2; + // The scale factor of the decay, in terms of `x`. + // Defaults to 1.0. Must be a non-zero positive number. + optional float scale = 3; + // The midpoint of the decay. + // Should be between 0 and 1. Defaults to 0.5. + // Output will be this value when `|x - target| == scale`. + optional float midpoint = 4; } message NearestInputWithMmr { - // The vector to search for nearest neighbors. - VectorInput nearest = 1; + // The vector to search for nearest neighbors. + VectorInput nearest = 1; - // Perform MMR (Maximal Marginal Relevance) reranking after search, - // using the same vector in this query to calculate relevance. - Mmr mmr = 2; + // Perform MMR (Maximal Marginal Relevance) reranking after search, + // using the same vector in this query to calculate relevance. + Mmr mmr = 2; } // Maximal Marginal Relevance (MMR) algorithm for re-ranking the points. message Mmr { - // Tunable parameter for the MMR algorithm. - // Determines the balance between diversity and relevance. - // - // A higher value favors diversity (dissimilarity to selected results), - // while a lower value favors relevance (similarity to the query vector). - // - // Must be in the range [0, 1]. - // Default value is 0.5. - optional float diversity = 2; - - // The maximum number of candidates to consider for re-ranking. - // - // If not specified, the `limit` value is used. - optional uint32 candidates_limit = 3; + // Tunable parameter for the MMR algorithm. + // Determines the balance between diversity and relevance. + // + // A higher value favors diversity (dissimilarity to selected results), + // while a lower value favors relevance (similarity to the query vector). + // + // Must be in the range [0, 1]. + // Default value is 0.5. + optional float diversity = 2; + + // The maximum number of candidates to consider for re-ranking. + // + // If not specified, the `limit` value is used. + optional uint32 candidates_limit = 3; } // Parameterized reciprocal rank fusion message Rrf { - optional uint32 k = 1; // K parameter for reciprocal rank fusion + // K parameter for reciprocal rank fusion + optional uint32 k = 1; + + // Weights for each prefetch source. + // Higher weight gives more influence on the final ranking. + // If not specified, all prefetches are weighted equally. + // The number of weights should match the number of prefetches. + repeated float weights = 2; } message Query { oneof variant { - VectorInput nearest = 1; // Find the nearest neighbors to this vector. - RecommendInput recommend = 2; // Use multiple positive and negative vectors to find the results. - DiscoverInput discover = 3; // Search for nearest points, but constrain the search space with context - ContextInput context = 4; // Return points that live in positive areas. - OrderBy order_by = 5; // Order the points by a payload field. - Fusion fusion = 6; // Fuse the results of multiple prefetches. - Sample sample = 7; // Sample points from the collection. - Formula formula = 8; // Score boosting via an arbitrary formula - NearestInputWithMmr nearest_with_mmr = 9; // Search nearest neighbors, but re-rank based on the Maximal Marginal Relevance algorithm. - Rrf rrf = 10; // Parameterized reciprocal rank fusion + // Find the nearest neighbors to this vector. + VectorInput nearest = 1; + // Use multiple positive and negative vectors to find the results. + RecommendInput recommend = 2; + // Search for nearest points, but constrain the search space with context + DiscoverInput discover = 3; + // Return points that live in positive areas. + ContextInput context = 4; + // Order the points by a payload field. + OrderBy order_by = 5; + // Fuse the results of multiple prefetches. + Fusion fusion = 6; + // Sample points from the collection. + Sample sample = 7; + // Score boosting via an arbitrary formula + Formula formula = 8; + // Search nearest neighbors, but re-rank based on the Maximal Marginal Relevance algorithm. + NearestInputWithMmr nearest_with_mmr = 9; + // Parameterized reciprocal rank fusion + Rrf rrf = 10; + // Search with feedback from some oracle. + RelevanceFeedbackInput relevance_feedback = 11; } } message PrefetchQuery { - repeated PrefetchQuery prefetch = 1; // Sub-requests to perform first. If present, the query will be performed on the results of the prefetches. - optional Query query = 2; // Query to perform. If missing, returns points ordered by their IDs. - optional string using = 3; // Define which vector to use for querying. If missing, the default vector is is used. - optional Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions. - optional SearchParams params = 5; // Search params for when there is no prefetch. - optional float score_threshold = 6; // Return points with scores better than this threshold. - optional uint64 limit = 7; // Max number of points. Default is 10 - optional LookupLocation lookup_from = 8; // The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector + // Sub-requests to perform first. + // If present, the query will be performed on the results of the prefetches. + repeated PrefetchQuery prefetch = 1; + // Query to perform. + // If missing, returns points ordered by their IDs. + optional Query query = 2; + // Define which vector to use for querying. + // If missing, the default vector is used. + optional string using = 3; + // Filter conditions - return only those points that satisfy the specified conditions. + optional Filter filter = 4; + // Search params for when there is no prefetch. + optional SearchParams params = 5; + // Return points with scores better than this threshold. + optional float score_threshold = 6; + // Max number of points. Default is 10 + optional uint64 limit = 7; + // The location to use for IDs lookup. + // If not specified - use the current collection and the 'using' vector. + optional LookupLocation lookup_from = 8; } message QueryPoints { - string collection_name = 1; // Name of the collection - repeated PrefetchQuery prefetch = 2; // Sub-requests to perform first. If present, the query will be performed on the results of the prefetches. - optional Query query = 3; // Query to perform. If missing, returns points ordered by their IDs. - optional string using = 4; // Define which vector to use for querying. If missing, the default vector is used. - optional Filter filter = 5; // Filter conditions - return only those points that satisfy the specified conditions. - optional SearchParams params = 6; // Search params for when there is no prefetch. - optional float score_threshold = 7; // Return points with scores better than this threshold. - optional uint64 limit = 8; // Max number of points. Default is 10. - optional uint64 offset = 9; // Offset of the result. Skip this many points. Default is 0. - optional WithVectorsSelector with_vectors = 10; // Options for specifying which vectors to include into the response. - optional WithPayloadSelector with_payload = 11; // Options for specifying which payload to include or not. - optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees. - optional ShardKeySelector shard_key_selector = 13; // Specify in which shards to look for the points, if not specified - look in all shards. - optional LookupLocation lookup_from = 14; // The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector - optional uint64 timeout = 15; // If set, overrides global timeout setting for this request. Unit is seconds. + // Name of the collection + string collection_name = 1; + // Sub-requests to perform first. + // If present, the query will be performed on the results of the prefetches. + repeated PrefetchQuery prefetch = 2; + // Query to perform. If missing, returns points ordered by their IDs. + optional Query query = 3; + // Define which vector to use for querying. + // If missing, the default vector is used. + optional string using = 4; + // Filter conditions - return only those points that satisfy the specified conditions. + optional Filter filter = 5; + // Search params for when there is no prefetch. + optional SearchParams params = 6; + // Return points with scores better than this threshold. + optional float score_threshold = 7; + // Max number of points. Default is 10. + optional uint64 limit = 8; + // Offset of the result. Skip this many points. Default is 0. + optional uint64 offset = 9; + // Options for specifying which vectors to include into the response. + optional WithVectorsSelector with_vectors = 10; + // Options for specifying which payload to include or not. + optional WithPayloadSelector with_payload = 11; + // Options for specifying read consistency guarantees. + optional ReadConsistency read_consistency = 12; + // Specify in which shards to look for the points. + // If not specified - look in all shards. + optional ShardKeySelector shard_key_selector = 13; + // The location to use for IDs lookup. + // If not specified - use the current collection and the 'using' vector. + optional LookupLocation lookup_from = 14; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 15; } message QueryBatchPoints { string collection_name = 1; repeated QueryPoints query_points = 2; - optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees - optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds. + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 3; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 4; } message QueryPointGroups { - string collection_name = 1; // Name of the collection - repeated PrefetchQuery prefetch = 2; // Sub-requests to perform first. If present, the query will be performed on the results of the prefetches. - optional Query query = 3; // Query to perform. If missing, returns points ordered by their IDs. - optional string using = 4; // Define which vector to use for querying. If missing, the default vector is used. - optional Filter filter = 5; // Filter conditions - return only those points that satisfy the specified conditions. - optional SearchParams params = 6; // Search params for when there is no prefetch. - optional float score_threshold = 7; // Return points with scores better than this threshold. - WithPayloadSelector with_payload = 8; // Options for specifying which payload to include or not - optional WithVectorsSelector with_vectors = 9; // Options for specifying which vectors to include into response - optional LookupLocation lookup_from = 10; // The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector - optional uint64 limit = 11; // Max number of points. Default is 3. - optional uint64 group_size = 12; // Maximum amount of points to return per group. Default to 10. - string group_by = 13; // Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups. - optional ReadConsistency read_consistency = 14; // Options for specifying read consistency guarantees - optional WithLookup with_lookup = 15; // Options for specifying how to use the group id to lookup points in another collection - optional uint64 timeout = 16; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ShardKeySelector shard_key_selector = 17; // Specify in which shards to look for the points, if not specified - look in all shards + // Name of the collection + string collection_name = 1; + // Sub-requests to perform first. + // If present, the query will be performed on the results of the prefetches. + repeated PrefetchQuery prefetch = 2; + // Query to perform. If missing, returns points ordered by their IDs. + optional Query query = 3; + // Define which vector to use for querying. + // If missing, the default vector is used. + optional string using = 4; + // Filter conditions - return only those points that satisfy the specified conditions. + optional Filter filter = 5; + // Search params for when there is no prefetch. + optional SearchParams params = 6; + // Return points with scores better than this threshold. + optional float score_threshold = 7; + // Options for specifying which payload to include or not + WithPayloadSelector with_payload = 8; + // Options for specifying which vectors to include into response + optional WithVectorsSelector with_vectors = 9; + // The location to use for IDs lookup. + // If not specified - use the current collection and the 'using' vector. + optional LookupLocation lookup_from = 10; + // Max number of points. Default is 3. + optional uint64 limit = 11; + // Maximum amount of points to return per group. Defaults to 10. + optional uint64 group_size = 12; + // Payload field to group by, must be a string or number field. + // If there are multiple values for the field, all of them will be used. + // One point can be in multiple groups. + string group_by = 13; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 14; + // Options for specifying how to use the group id to lookup points in another collection + optional WithLookup with_lookup = 15; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 16; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 17; } message FacetCounts { - string collection_name = 1; // Name of the collection - string key = 2; // Payload key of the facet - optional Filter filter = 3; // Filter conditions - return only those points that satisfy the specified conditions. - optional uint64 limit = 4; // Max number of facets. Default is 10. - optional bool exact = 5; // If true, return exact counts, slower but useful for debugging purposes. Default is false. - optional uint64 timeout = 6; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ReadConsistency read_consistency = 7; // Options for specifying read consistency guarantees - optional ShardKeySelector shard_key_selector = 8; // Specify in which shards to look for the points, if not specified - look in all shards + // Name of the collection + string collection_name = 1; + // Payload key of the facet + string key = 2; + // Filter conditions - return only those points that satisfy the specified conditions. + optional Filter filter = 3; + // Max number of facets. Default is 10. + optional uint64 limit = 4; + // If true, return exact counts, slower but useful for debugging purposes. Default is false. + optional bool exact = 5; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 6; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 7; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 8; } message FacetValue { - oneof variant { - string string_value = 1; // String value from the facet - int64 integer_value = 2; // Integer value from the facet - bool bool_value = 3; // Boolean value from the facet - } + oneof variant { + // String value from the facet + string string_value = 1; + // Integer value from the facet + int64 integer_value = 2; + // Boolean value from the facet + bool bool_value = 3; + } } message FacetHit { - FacetValue value = 1; // Value from the facet - uint64 count = 2; // Number of points with this value + // Value from the facet + FacetValue value = 1; + // Number of points with this value + uint64 count = 2; } message SearchMatrixPoints { - string collection_name = 1; // Name of the collection - optional Filter filter = 2; // Filter conditions - return only those points that satisfy the specified conditions. - optional uint64 sample = 3; // How many points to select and search within. Default is 10. - optional uint64 limit = 4; // How many neighbours per sample to find. Default is 3. - optional string using = 5; // Define which vector to use for querying. If missing, the default vector is is used. - optional uint64 timeout = 6; // If set, overrides global timeout setting for this request. Unit is seconds. - optional ReadConsistency read_consistency = 7; // Options for specifying read consistency guarantees - optional ShardKeySelector shard_key_selector = 8; // Specify in which shards to look for the points, if not specified - look in all shards + // Name of the collection + string collection_name = 1; + // Filter conditions - return only those points that satisfy the specified conditions. + optional Filter filter = 2; + // How many points to select and search within. Default is 10. + optional uint64 sample = 3; + // How many neighbours per sample to find. Default is 3. + optional uint64 limit = 4; + // Define which vector to use for querying. If missing, the default vector is used. + optional string using = 5; + // If set, overrides global timeout setting for this request. Unit is seconds. + optional uint64 timeout = 6; + // Options for specifying read consistency guarantees + optional ReadConsistency read_consistency = 7; + // Specify in which shards to look for the points, if not specified - look in all shards + optional ShardKeySelector shard_key_selector = 8; } message SearchMatrixPairs { - repeated SearchMatrixPair pairs = 1; // List of pairs of points with scores + // List of pairs of points with scores + repeated SearchMatrixPair pairs = 1; } message SearchMatrixPair { - PointId a = 1; // first id of the pair - PointId b = 2; // second id of the pair - float score = 3; // score of the pair + // first id of the pair + PointId a = 1; + // second id of the pair + PointId b = 2; + // score of the pair + float score = 3; } message SearchMatrixOffsets { - repeated uint64 offsets_row = 1; // Row indices of the matrix - repeated uint64 offsets_col = 2; // Column indices of the matrix - repeated float scores = 3; // Scores associated with matrix coordinates - repeated PointId ids = 4; // Ids of the points in order + // Row indices of the matrix + repeated uint64 offsets_row = 1; + // Column indices of the matrix + repeated uint64 offsets_col = 2; + // Scores associated with matrix coordinates + repeated float scores = 3; + // Ids of the points in order + repeated PointId ids = 4; } - message PointsUpdateOperation { message PointStructList { repeated PointStruct points = 1; - optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys - optional Filter update_filter = 3; // If specified, only points that match this filter will be updated, others will be inserted + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 2; + // Filter to apply when updating existing points. Only points matching this filter will be updated. + // Points that don't match will keep their current state. New points will be inserted regardless of the filter. + optional Filter update_filter = 3; + // Mode of the upsert operation: insert_only, upsert (default), update_only + optional UpdateMode update_mode = 4; } message SetPayload { - map payload = 1; - optional PointsSelector points_selector = 2; // Affected points - optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys - optional string key = 4; // Option for indicate property of payload + map payload = 1; + // Affected points + optional PointsSelector points_selector = 2; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 3; + // Option for indicate property of payload + optional string key = 4; } message OverwritePayload { - map payload = 1; - optional PointsSelector points_selector = 2; // Affected points - optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys - optional string key = 4; // Option for indicate property of payload + map payload = 1; + // Affected points + optional PointsSelector points_selector = 2; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 3; + // Option for indicate property of payload + optional string key = 4; } message DeletePayload { - repeated string keys = 1; - optional PointsSelector points_selector = 2; // Affected points - optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys + repeated string keys = 1; + // Affected points + optional PointsSelector points_selector = 2; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 3; } message UpdateVectors { - repeated PointVectors points = 1; // List of points and vectors to update - optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys - optional Filter update_filter = 3; // If specified, only points that match this filter will be updated + // List of points and vectors to update + repeated PointVectors points = 1; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 2; + // If specified, only points that match this filter will be updated + optional Filter update_filter = 3; } message DeleteVectors { - PointsSelector points_selector = 1; // Affected points - VectorsSelector vectors = 2; // List of vector names to delete - optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys + // Affected points + PointsSelector points_selector = 1; + // List of vector names to delete + VectorsSelector vectors = 2; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 3; } message DeletePoints { - PointsSelector points = 1; // Affected points - optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys + // Affected points + PointsSelector points = 1; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 2; } message ClearPayload { - PointsSelector points = 1; // Affected points - optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys + // Affected points + PointsSelector points = 1; + // Option for custom sharding to specify used shard keys + optional ShardKeySelector shard_key_selector = 2; } oneof operation { PointStructList upsert = 1; - PointsSelector delete_deprecated = 2 [deprecated=true]; + PointsSelector delete_deprecated = 2 [deprecated = true]; SetPayload set_payload = 3; OverwritePayload overwrite_payload = 4; DeletePayload delete_payload = 5; - PointsSelector clear_payload_deprecated = 6 [deprecated=true]; + PointsSelector clear_payload_deprecated = 6 [deprecated = true]; UpdateVectors update_vectors = 7; DeleteVectors delete_vectors = 8; DeletePoints delete_points = 9; @@ -872,10 +1287,15 @@ message PointsUpdateOperation { } message UpdateBatchPoints { - string collection_name = 1; // name of the collection - optional bool wait = 2; // Wait until the changes have been applied? + // name of the collection + string collection_name = 1; + // Wait until the changes have been applied? + optional bool wait = 2; repeated PointsUpdateOperation operations = 3; - optional WriteOrdering ordering = 4; // Write ordering guarantees + // Write ordering guarantees + optional WriteOrdering ordering = 4; + // Timeout for the operation in seconds + optional uint64 timeout = 5; } // --------------------------------------------- @@ -884,20 +1304,28 @@ message UpdateBatchPoints { message PointsOperationResponse { UpdateResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message UpdateResult { - optional uint64 operation_id = 1; // Number of operation - UpdateStatus status = 2; // Operation status + // Number of operation + optional uint64 operation_id = 1; + // Operation status + UpdateStatus status = 2; } enum UpdateStatus { UnknownUpdateStatus = 0; - Acknowledged = 1; // Update is received, but not processed yet - Completed = 2; // Update is applied and ready for search - ClockRejected = 3; // Internal: update is rejected due to an outdated clock + // Update is received, but not processed yet + Acknowledged = 1; + // Update is applied and ready for search + Completed = 2; + // Internal: update is rejected due to an outdated clock + ClockRejected = 3; + // Timeout of awaited operations + WaitTimeout = 4; } message OrderValue { @@ -908,19 +1336,27 @@ message OrderValue { } message ScoredPoint { - PointId id = 1; // Point id - map payload = 2; // Payload - float score = 3; // Similarity score - reserved 4; // deprecated "vector" field - uint64 version = 5; // Last update operation applied to this point - optional VectorsOutput vectors = 6; // Vectors to search - optional ShardKey shard_key = 7; // Shard key - optional OrderValue order_value = 8; // Order by value + // Point id + PointId id = 1; + // Payload + map payload = 2; + // Similarity score + float score = 3; + // deprecated "vector" field + reserved 4; + // Last update operation applied to this point + uint64 version = 5; + // Vectors to search + optional VectorsOutput vectors = 6; + // Shard key + optional ShardKey shard_key = 7; + // Order by value + optional OrderValue order_value = 8; } message GroupId { oneof kind { - // Represents a double value. + // Represents an unsigned integer value. uint64 unsigned_value = 1; // Represents an integer value int64 integer_value = 2; @@ -930,36 +1366,44 @@ message GroupId { } message PointGroup { - GroupId id = 1; // Group id - repeated ScoredPoint hits = 2; // Points in the group - RetrievedPoint lookup = 3; // Point(s) from the lookup collection that matches the group id + // Group id + GroupId id = 1; + // Points in the group + repeated ScoredPoint hits = 2; + // Point(s) from the lookup collection that matches the group id + RetrievedPoint lookup = 3; } message GroupsResult { - repeated PointGroup groups = 1; // Groups + // Groups + repeated PointGroup groups = 1; } message SearchResponse { repeated ScoredPoint result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message QueryResponse { repeated ScoredPoint result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message QueryBatchResponse { repeated BatchResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message QueryGroupsResponse { GroupsResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } @@ -969,26 +1413,31 @@ message BatchResult { message SearchBatchResponse { repeated BatchResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message SearchGroupsResponse { GroupsResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message CountResponse { CountResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message ScrollResponse { - optional PointId next_page_offset = 1; // Use this offset for the next query + // Use this offset for the next query + optional PointId next_page_offset = 1; repeated RetrievedPoint result = 2; - double time = 3; // Time spent to process + // Time spent to process + double time = 3; optional Usage usage = 4; } @@ -999,69 +1448,82 @@ message CountResult { message RetrievedPoint { PointId id = 1; map payload = 2; - reserved 3; // deprecated "vector" field + // deprecated "vector" field + reserved 3; optional VectorsOutput vectors = 4; - optional ShardKey shard_key = 5; // Shard key - optional OrderValue order_value = 6; // Order-by value + // Shard key + optional ShardKey shard_key = 5; + // Order-by value + optional OrderValue order_value = 6; } message GetResponse { repeated RetrievedPoint result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message RecommendResponse { repeated ScoredPoint result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message RecommendBatchResponse { repeated BatchResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message DiscoverResponse { repeated ScoredPoint result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message DiscoverBatchResponse { repeated BatchResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message RecommendGroupsResponse { GroupsResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message UpdateBatchResponse { repeated UpdateResult result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message FacetResponse { - repeated FacetHit hits = 1; - double time = 2; // Time spent to process - optional Usage usage = 3; + repeated FacetHit hits = 1; + // Time spent to process + double time = 2; + optional Usage usage = 3; } message SearchMatrixPairsResponse { SearchMatrixPairs result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } message SearchMatrixOffsetsResponse { SearchMatrixOffsets result = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; optional Usage usage = 3; } @@ -1084,10 +1546,10 @@ message PointsIdsList { // ------------------- Point ------------------- // --------------------------------------------- - message PointStruct { PointId id = 1; - reserved 2; // deprecated "vector" field + // deprecated "vector" field + reserved 2; map payload = 3; optional Vectors vectors = 4; } @@ -1100,7 +1562,6 @@ message Usage { optional InferenceUsage inference = 2; } - // --------------------------------------------- // ------------ Inference measurements ---------- // --------------------------------------------- diff --git a/proto/points_service.proto b/proto/points_service.proto index ef830078..860d4c7b 100644 --- a/proto/points_service.proto +++ b/proto/points_service.proto @@ -6,131 +6,98 @@ package qdrant; option csharp_namespace = "Qdrant.Client.Grpc"; service Points { - /* - Perform insert + updates on points. If a point with a given ID already exists - it will be overwritten. - */ - rpc Upsert (UpsertPoints) returns (PointsOperationResponse) {} - /* - Delete points - */ - rpc Delete (DeletePoints) returns (PointsOperationResponse) {} - /* - Retrieve points - */ - rpc Get (GetPoints) returns (GetResponse) {} - /* - Update named vectors for point - */ - rpc UpdateVectors (UpdatePointVectors) returns (PointsOperationResponse) {} - /* - Delete named vectors for points - */ - rpc DeleteVectors (DeletePointVectors) returns (PointsOperationResponse) {} - /* - Set payload for points - */ - rpc SetPayload (SetPayloadPoints) returns (PointsOperationResponse) {} - /* - Overwrite payload for points - */ - rpc OverwritePayload (SetPayloadPoints) returns (PointsOperationResponse) {} - /* - Delete specified key payload for points - */ - rpc DeletePayload (DeletePayloadPoints) returns (PointsOperationResponse) {} - /* - Remove all payload for specified points - */ - rpc ClearPayload (ClearPayloadPoints) returns (PointsOperationResponse) {} - /* - Create index for field in collection - */ - rpc CreateFieldIndex (CreateFieldIndexCollection) returns (PointsOperationResponse) {} - /* - Delete field index for collection - */ - rpc DeleteFieldIndex (DeleteFieldIndexCollection) returns (PointsOperationResponse) {} - /* - Retrieve closest points based on vector similarity and given filtering conditions - */ - rpc Search (SearchPoints) returns (SearchResponse) {} - /* - Retrieve closest points based on vector similarity and given filtering conditions - */ - rpc SearchBatch (SearchBatchPoints) returns (SearchBatchResponse) {} - /* - Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given field - */ - rpc SearchGroups (SearchPointGroups) returns (SearchGroupsResponse) {} - /* - Iterate over all or filtered points - */ - rpc Scroll (ScrollPoints) returns (ScrollResponse) {} - /* - Look for the points which are closer to stored positive examples and at the same time further to negative examples. - */ - rpc Recommend (RecommendPoints) returns (RecommendResponse) {} - /* - Look for the points which are closer to stored positive examples and at the same time further to negative examples. - */ - rpc RecommendBatch (RecommendBatchPoints) returns (RecommendBatchResponse) {} - /* - Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field - */ - rpc RecommendGroups (RecommendPointGroups) returns (RecommendGroupsResponse) {} - /* - Use context and a target to find the most similar points to the target, constrained by the context. + // Perform insert + updates on points. + // If a point with a given ID already exists - it will be overwritten. + rpc Upsert(UpsertPoints) returns (PointsOperationResponse) {} + // Delete points + rpc Delete(DeletePoints) returns (PointsOperationResponse) {} + // Retrieve points + rpc Get(GetPoints) returns (GetResponse) {} + // Update named vectors for point + rpc UpdateVectors(UpdatePointVectors) returns (PointsOperationResponse) {} + // Delete named vectors for points + rpc DeleteVectors(DeletePointVectors) returns (PointsOperationResponse) {} + // Set payload for points + rpc SetPayload(SetPayloadPoints) returns (PointsOperationResponse) {} + // Overwrite payload for points + rpc OverwritePayload(SetPayloadPoints) returns (PointsOperationResponse) {} + // Delete specified key payload for points + rpc DeletePayload(DeletePayloadPoints) returns (PointsOperationResponse) {} + // Remove all payload for specified points + rpc ClearPayload(ClearPayloadPoints) returns (PointsOperationResponse) {} + // Create index for field in collection + rpc CreateFieldIndex(CreateFieldIndexCollection) + returns (PointsOperationResponse) {} + // Delete field index for collection + rpc DeleteFieldIndex(DeleteFieldIndexCollection) + returns (PointsOperationResponse) {} + // Retrieve closest points based on vector similarity and given filtering + // conditions + rpc Search(SearchPoints) returns (SearchResponse) {} + // Retrieve closest points based on vector similarity and given filtering + // conditions + rpc SearchBatch(SearchBatchPoints) returns (SearchBatchResponse) {} + // Retrieve closest points based on vector similarity and given filtering + // conditions, grouped by a given field + rpc SearchGroups(SearchPointGroups) returns (SearchGroupsResponse) {} + // Iterate over all or filtered points + rpc Scroll(ScrollPoints) returns (ScrollResponse) {} + // Look for the points which are closer to stored positive examples and at + // the same time further to negative examples. + rpc Recommend(RecommendPoints) returns (RecommendResponse) {} + // Look for the points which are closer to stored positive examples and at + // the same time further to negative examples. + rpc RecommendBatch(RecommendBatchPoints) returns (RecommendBatchResponse) {} + // Look for the points which are closer to stored positive examples and at + // the same time further to negative examples, grouped by a given field + rpc RecommendGroups(RecommendPointGroups) returns (RecommendGroupsResponse) {} + // Use context and a target to find the most similar points to the target, + // constrained by the context. + // + // When using only the context (without a target), a special search - called + // context search - is performed where pairs of points are used to generate a + // loss that guides the search towards the zone where most positive examples + // overlap. This means that the score minimizes the scenario of finding a + // point closer to a negative than to a positive part of a pair. + // + // Since the score of a context relates to loss, the maximum score a point + // can get is 0.0, and it becomes normal that many points can have a score of + // 0.0. + // + // When using target (with or without context), the score behaves a little + // different: The integer part of the score represents the rank with respect + // to the context, while the decimal part of the score relates to the + // distance to the target. The context part of the score for each pair is + // calculated +1 if the point is closer to a positive than to a negative part + // of a pair, and -1 otherwise. + rpc Discover(DiscoverPoints) returns (DiscoverResponse) {} + // Batch request points based on { positive, negative } pairs of examples, and/or a target + rpc DiscoverBatch(DiscoverBatchPoints) returns (DiscoverBatchResponse) {} + // Count points in collection with given filtering conditions + rpc Count(CountPoints) returns (CountResponse) {} - When using only the context (without a target), a special search - called context search - is performed where - pairs of points are used to generate a loss that guides the search towards the zone where - most positive examples overlap. This means that the score minimizes the scenario of - finding a point closer to a negative than to a positive part of a pair. - - Since the score of a context relates to loss, the maximum score a point can get is 0.0, - and it becomes normal that many points can have a score of 0.0. - - When using target (with or without context), the score behaves a little different: The - integer part of the score represents the rank with respect to the context, while the - decimal part of the score relates to the distance to the target. The context part of the score for - each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, - and -1 otherwise. - */ - rpc Discover (DiscoverPoints) returns (DiscoverResponse) {} - /* - Batch request points based on { positive, negative } pairs of examples, and/or a target - */ - rpc DiscoverBatch (DiscoverBatchPoints) returns (DiscoverBatchResponse) {} - /* - Count points in collection with given filtering conditions - */ - rpc Count (CountPoints) returns (CountResponse) {} - - /* - Perform multiple update operations in one request - */ - rpc UpdateBatch (UpdateBatchPoints) returns (UpdateBatchResponse) {} - /* - Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. - */ - rpc Query (QueryPoints) returns (QueryResponse) {} - /* - Universally query points in a batch fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. - */ - rpc QueryBatch (QueryBatchPoints) returns (QueryBatchResponse) {} - /* - Universally query points in a group fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. - */ - rpc QueryGroups (QueryPointGroups) returns (QueryGroupsResponse) {} - /* - Perform facet counts. For each value in the field, count the number of points that have this value and match the conditions. - */ - rpc Facet (FacetCounts) returns (FacetResponse) {} - /* - Compute distance matrix for sampled points with a pair based output format - */ - rpc SearchMatrixPairs (SearchMatrixPoints) returns (SearchMatrixPairsResponse) {} - /* - Compute distance matrix for sampled points with an offset based output format - */ - rpc SearchMatrixOffsets (SearchMatrixPoints) returns (SearchMatrixOffsetsResponse) {} + // Perform multiple update operations in one request + rpc UpdateBatch(UpdateBatchPoints) returns (UpdateBatchResponse) {} + // Universally query points. + // This endpoint covers all capabilities of search, recommend, discover, filters. + // But also enables hybrid and multi-stage queries. + rpc Query(QueryPoints) returns (QueryResponse) {} + // Universally query points in a batch fashion. + // This endpoint covers all capabilities of search, recommend, discover, filters. + // But also enables hybrid and multi-stage queries. + rpc QueryBatch(QueryBatchPoints) returns (QueryBatchResponse) {} + // Universally query points in a group fashion. + // This endpoint covers all capabilities of search, recommend, discover, filters. + // But also enables hybrid and multi-stage queries. + rpc QueryGroups(QueryPointGroups) returns (QueryGroupsResponse) {} + // Perform facet counts. + // For each value in the field, count the number of points that have this + // value and match the conditions. + rpc Facet(FacetCounts) returns (FacetResponse) {} + // Compute distance matrix for sampled points with a pair based output format + rpc SearchMatrixPairs(SearchMatrixPoints) + returns (SearchMatrixPairsResponse) {} + // Compute distance matrix for sampled points with an offset based output format + rpc SearchMatrixOffsets(SearchMatrixPoints) + returns (SearchMatrixOffsetsResponse) {} } diff --git a/proto/qdrant.proto b/proto/qdrant.proto index 723bb659..42545ffb 100644 --- a/proto/qdrant.proto +++ b/proto/qdrant.proto @@ -8,7 +8,7 @@ package qdrant; option csharp_namespace = "Qdrant.Client.Grpc"; service Qdrant { - rpc HealthCheck (HealthCheckRequest) returns (HealthCheckReply) {} + rpc HealthCheck(HealthCheckRequest) returns (HealthCheckReply) {} } message HealthCheckRequest {} diff --git a/proto/qdrant_common.proto b/proto/qdrant_common.proto new file mode 100644 index 00000000..770fec24 --- /dev/null +++ b/proto/qdrant_common.proto @@ -0,0 +1,177 @@ +syntax = "proto3"; +package qdrant; + +option csharp_namespace = "Qdrant.Client.Grpc"; +option java_outer_classname = "Common"; + +import "google/protobuf/timestamp.proto"; + +message PointId { + oneof point_id_options { + // Numerical ID of the point + uint64 num = 1; + // UUID + string uuid = 2; + } +} + +message GeoPoint { + double lon = 1; + double lat = 2; +} + +message Filter { + // At least one of those conditions should match + repeated Condition should = 1; + // All conditions must match + repeated Condition must = 2; + // All conditions must NOT match + repeated Condition must_not = 3; + // At least minimum amount of given conditions should match + optional MinShould min_should = 4; +} + +message MinShould { + repeated Condition conditions = 1; + uint64 min_count = 2; +} + +message Condition { + oneof condition_one_of { + FieldCondition field = 1; + IsEmptyCondition is_empty = 2; + HasIdCondition has_id = 3; + Filter filter = 4; + IsNullCondition is_null = 5; + NestedCondition nested = 6; + HasVectorCondition has_vector = 7; + } +} + +message IsEmptyCondition { + string key = 1; +} + +message IsNullCondition { + string key = 1; +} + +message HasIdCondition { + repeated PointId has_id = 1; +} + +message HasVectorCondition { + string has_vector = 1; +} + +message NestedCondition { + // Path to nested object + string key = 1; + // Filter condition + Filter filter = 2; +} + +message FieldCondition { + string key = 1; + // Check if point has field with a given value + Match match = 2; + // Check if points value lies in a given range + Range range = 3; + // Check if points geolocation lies in a given area + GeoBoundingBox geo_bounding_box = 4; + // Check if geo point is within a given radius + GeoRadius geo_radius = 5; + // Check number of values for a specific field + ValuesCount values_count = 6; + // Check if geo point is within a given polygon + GeoPolygon geo_polygon = 7; + // Check if datetime is within a given range + DatetimeRange datetime_range = 8; + // Check if field is empty + optional bool is_empty = 9; + // Check if field is null + optional bool is_null = 10; +} + +message Match { + oneof match_value { + // Match string keyword + string keyword = 1; + // Match integer + int64 integer = 2; + // Match boolean + bool boolean = 3; + // Match text + string text = 4; + // Match multiple keywords + RepeatedStrings keywords = 5; + // Match multiple integers + RepeatedIntegers integers = 6; + // Match any other value except those integers + RepeatedIntegers except_integers = 7; + // Match any other value except those keywords + RepeatedStrings except_keywords = 8; + // Match phrase text + string phrase = 9; + // Match any word in the text + string text_any = 10; + } +} + +message RepeatedStrings { + repeated string strings = 1; +} + +message RepeatedIntegers { + repeated int64 integers = 1; +} + +message Range { + optional double lt = 1; + optional double gt = 2; + optional double gte = 3; + optional double lte = 4; +} + +message DatetimeRange { + optional google.protobuf.Timestamp lt = 1; + optional google.protobuf.Timestamp gt = 2; + optional google.protobuf.Timestamp gte = 3; + optional google.protobuf.Timestamp lte = 4; +} + +message GeoBoundingBox { + // north-west corner + GeoPoint top_left = 1; + // south-east corner + GeoPoint bottom_right = 2; +} + +message GeoRadius { + // Center of the circle + GeoPoint center = 1; + // In meters + float radius = 2; +} + +message GeoLineString { + // Ordered sequence of GeoPoints representing the line + repeated GeoPoint points = 1; +} + +// For a valid GeoPolygon, both the exterior and interior GeoLineStrings must +// consist of a minimum of 4 points. +// Additionally, the first and last points of each GeoLineString must be the same. +message GeoPolygon { + // The exterior line bounds the surface + GeoLineString exterior = 1; + // Interior lines (if present) bound holes within the surface + repeated GeoLineString interiors = 2; +} + +message ValuesCount { + optional uint64 lt = 1; + optional uint64 gt = 2; + optional uint64 gte = 3; + optional uint64 lte = 4; +} diff --git a/proto/snapshots_service.proto b/proto/snapshots_service.proto index 63c9e519..df79fb9e 100644 --- a/proto/snapshots_service.proto +++ b/proto/snapshots_service.proto @@ -6,30 +6,18 @@ option csharp_namespace = "Qdrant.Client.Grpc"; import "google/protobuf/timestamp.proto"; service Snapshots { - /* - Create collection snapshot - */ - rpc Create (CreateSnapshotRequest) returns (CreateSnapshotResponse) {} - /* - List collection snapshots - */ - rpc List (ListSnapshotsRequest) returns (ListSnapshotsResponse) {} - /* - Delete collection snapshot - */ - rpc Delete (DeleteSnapshotRequest) returns (DeleteSnapshotResponse) {} - /* - Create full storage snapshot - */ - rpc CreateFull (CreateFullSnapshotRequest) returns (CreateSnapshotResponse) {} - /* - List full storage snapshots - */ - rpc ListFull (ListFullSnapshotsRequest) returns (ListSnapshotsResponse) {} - /* - Delete full storage snapshot - */ - rpc DeleteFull (DeleteFullSnapshotRequest) returns (DeleteSnapshotResponse) {} + // Create collection snapshot + rpc Create(CreateSnapshotRequest) returns (CreateSnapshotResponse) {} + // List collection snapshots + rpc List(ListSnapshotsRequest) returns (ListSnapshotsResponse) {} + // Delete collection snapshot + rpc Delete(DeleteSnapshotRequest) returns (DeleteSnapshotResponse) {} + // Create full storage snapshot + rpc CreateFull(CreateFullSnapshotRequest) returns (CreateSnapshotResponse) {} + // List full storage snapshots + rpc ListFull(ListFullSnapshotsRequest) returns (ListSnapshotsResponse) {} + // Delete full storage snapshot + rpc DeleteFull(DeleteFullSnapshotRequest) returns (DeleteSnapshotResponse) {} } message CreateFullSnapshotRequest {} @@ -37,39 +25,51 @@ message CreateFullSnapshotRequest {} message ListFullSnapshotsRequest {} message DeleteFullSnapshotRequest { - string snapshot_name = 1; // Name of the full snapshot + // Name of the full snapshot + string snapshot_name = 1; } message CreateSnapshotRequest { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; } message ListSnapshotsRequest { - string collection_name = 1; // Name of the collection + // Name of the collection + string collection_name = 1; } message DeleteSnapshotRequest { - string collection_name = 1; // Name of the collection - string snapshot_name = 2; // Name of the collection snapshot + // Name of the collection + string collection_name = 1; + // Name of the collection snapshot + string snapshot_name = 2; } message SnapshotDescription { - string name = 1; // Name of the snapshot - google.protobuf.Timestamp creation_time = 2; // Creation time of the snapshot - int64 size = 3; // Size of the snapshot in bytes - optional string checksum = 4; // SHA256 digest of the snapshot file + // Name of the snapshot + string name = 1; + // Creation time of the snapshot + google.protobuf.Timestamp creation_time = 2; + // Size of the snapshot in bytes + int64 size = 3; + // SHA256 digest of the snapshot file + optional string checksum = 4; } message CreateSnapshotResponse { SnapshotDescription snapshot_description = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; } message ListSnapshotsResponse { repeated SnapshotDescription snapshot_descriptions = 1; - double time = 2; // Time spent to process + // Time spent to process + double time = 2; } message DeleteSnapshotResponse { - double time = 1; // Time spent to process + // Time spent to process + double time = 1; } diff --git a/src/builders/bool_index_params_builder.rs b/src/builders/bool_index_params_builder.rs index 387c0085..7fbc61a0 100644 --- a/src/builders/bool_index_params_builder.rs +++ b/src/builders/bool_index_params_builder.rs @@ -4,6 +4,8 @@ use crate::qdrant::*; pub struct BoolIndexParamsBuilder { /// If true - store index on disk. pub(crate) on_disk: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl Default for BoolIndexParamsBuilder { @@ -23,16 +25,24 @@ impl BoolIndexParamsBuilder { new.on_disk = Option::Some(Option::Some(value)); new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(BoolIndexParams { on_disk: self.on_disk.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. fn create_empty() -> Self { Self { on_disk: core::default::Default::default(), + enable_hnsw: core::default::Default::default(), } } } diff --git a/src/builders/clear_payload_points_builder.rs b/src/builders/clear_payload_points_builder.rs index 60698312..f3c0f8d6 100644 --- a/src/builders/clear_payload_points_builder.rs +++ b/src/builders/clear_payload_points_builder.rs @@ -13,6 +13,8 @@ pub struct ClearPayloadPointsBuilder { pub(crate) ordering: Option>, /// Option for custom sharding to specify used shard keys pub(crate) shard_key_selector: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl ClearPayloadPointsBuilder { @@ -52,6 +54,12 @@ impl ClearPayloadPointsBuilder { new.shard_key_selector = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(ClearPayloadPoints { @@ -67,6 +75,7 @@ impl ClearPayloadPointsBuilder { points: { convert_option(&self.points) }, ordering: self.ordering.unwrap_or_default(), shard_key_selector: self.shard_key_selector.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -77,6 +86,7 @@ impl ClearPayloadPointsBuilder { points: core::default::Default::default(), ordering: core::default::Default::default(), shard_key_selector: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/collection_params_diff_builder.rs b/src/builders/collection_params_diff_builder.rs index 8647c0c1..298695f5 100644 --- a/src/builders/collection_params_diff_builder.rs +++ b/src/builders/collection_params_diff_builder.rs @@ -10,6 +10,8 @@ pub struct CollectionParamsDiffBuilder { pub(crate) on_disk_payload: Option>, /// Fan-out every read request to these many additional remote nodes (and return first available response) pub(crate) read_fan_out_factor: Option>, + /// Fan-out delay in milliseconds. If set, the fan-out request will be delayed by this amount. + pub(crate) read_fan_out_delay_ms: Option>, } #[allow(clippy::all)] #[allow(clippy::derive_partial_eq_without_eq)] @@ -38,6 +40,12 @@ impl CollectionParamsDiffBuilder { new.read_fan_out_factor = Option::Some(Option::Some(value)); new } + /// Fan-out delay in milliseconds. If set, the fan-out request will be delayed by this amount. + pub fn read_fan_out_delay_ms(self, value: u64) -> Self { + let mut new = self; + new.read_fan_out_delay_ms = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(CollectionParamsDiff { @@ -57,6 +65,10 @@ impl CollectionParamsDiffBuilder { Some(value) => value, None => core::default::Default::default(), }, + read_fan_out_delay_ms: match self.read_fan_out_delay_ms { + Some(value) => value, + None => core::default::Default::default(), + }, }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -66,6 +78,7 @@ impl CollectionParamsDiffBuilder { write_consistency_factor: core::default::Default::default(), on_disk_payload: core::default::Default::default(), read_fan_out_factor: core::default::Default::default(), + read_fan_out_delay_ms: core::default::Default::default(), } } } diff --git a/src/builders/create_field_index_collection_builder.rs b/src/builders/create_field_index_collection_builder.rs index f106dcbd..404d6d77 100644 --- a/src/builders/create_field_index_collection_builder.rs +++ b/src/builders/create_field_index_collection_builder.rs @@ -15,6 +15,8 @@ pub struct CreateFieldIndexCollectionBuilder { field_index_params: Option, /// Write ordering guarantees pub(crate) ordering: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl CreateFieldIndexCollectionBuilder { @@ -57,6 +59,12 @@ impl CreateFieldIndexCollectionBuilder { new.ordering = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner( self, @@ -82,6 +90,7 @@ impl CreateFieldIndexCollectionBuilder { field_type: self.field_type.unwrap_or_default(), field_index_params: { convert_option(&self.field_index_params) }, ordering: self.ordering.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -93,6 +102,7 @@ impl CreateFieldIndexCollectionBuilder { field_type: core::default::Default::default(), field_index_params: core::default::Default::default(), ordering: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/datetime_index_params_builder.rs b/src/builders/datetime_index_params_builder.rs index f9ed999c..fabd2a14 100644 --- a/src/builders/datetime_index_params_builder.rs +++ b/src/builders/datetime_index_params_builder.rs @@ -6,6 +6,8 @@ pub struct DatetimeIndexParamsBuilder { pub(crate) on_disk: Option>, /// If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. pub(crate) is_principal: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl DatetimeIndexParamsBuilder { @@ -21,11 +23,18 @@ impl DatetimeIndexParamsBuilder { new.is_principal = Option::Some(Option::Some(value)); new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(DatetimeIndexParams { on_disk: self.on_disk.unwrap_or_default(), is_principal: self.is_principal.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -33,6 +42,7 @@ impl DatetimeIndexParamsBuilder { Self { on_disk: core::default::Default::default(), is_principal: core::default::Default::default(), + enable_hnsw: core::default::Default::default(), } } } diff --git a/src/builders/delete_field_index_collection_builder.rs b/src/builders/delete_field_index_collection_builder.rs index 4d396ece..cccc14e1 100644 --- a/src/builders/delete_field_index_collection_builder.rs +++ b/src/builders/delete_field_index_collection_builder.rs @@ -10,6 +10,8 @@ pub struct DeleteFieldIndexCollectionBuilder { pub(crate) field_name: Option, /// Write ordering guarantees pub(crate) ordering: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl DeleteFieldIndexCollectionBuilder { @@ -37,6 +39,12 @@ impl DeleteFieldIndexCollectionBuilder { new.ordering = Option::Some(Option::Some(value)); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner( self, @@ -60,6 +68,7 @@ impl DeleteFieldIndexCollectionBuilder { } }, ordering: self.ordering.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -69,6 +78,7 @@ impl DeleteFieldIndexCollectionBuilder { wait: core::default::Default::default(), field_name: core::default::Default::default(), ordering: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/delete_payload_points_builder.rs b/src/builders/delete_payload_points_builder.rs index 1bcc20f8..f6639306 100644 --- a/src/builders/delete_payload_points_builder.rs +++ b/src/builders/delete_payload_points_builder.rs @@ -15,6 +15,8 @@ pub struct DeletePayloadPointsBuilder { pub(crate) ordering: Option>, /// Option for custom sharding to specify used shard keys pub(crate) shard_key_selector: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl DeletePayloadPointsBuilder { @@ -60,6 +62,12 @@ impl DeletePayloadPointsBuilder { new.shard_key_selector = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(DeletePayloadPoints { @@ -83,6 +91,7 @@ impl DeletePayloadPointsBuilder { points_selector: { convert_option(&self.points_selector) }, ordering: self.ordering.unwrap_or_default(), shard_key_selector: self.shard_key_selector.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -94,6 +103,7 @@ impl DeletePayloadPointsBuilder { points_selector: core::default::Default::default(), ordering: core::default::Default::default(), shard_key_selector: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/delete_point_vectors_builder.rs b/src/builders/delete_point_vectors_builder.rs index 777ead11..57f825a8 100644 --- a/src/builders/delete_point_vectors_builder.rs +++ b/src/builders/delete_point_vectors_builder.rs @@ -15,6 +15,8 @@ pub struct DeletePointVectorsBuilder { pub(crate) ordering: Option>, /// Option for custom sharding to specify used shard keys pub(crate) shard_key_selector: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl DeletePointVectorsBuilder { @@ -60,6 +62,12 @@ impl DeletePointVectorsBuilder { new.shard_key_selector = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(DeletePointVectors { @@ -76,6 +84,7 @@ impl DeletePointVectorsBuilder { vectors: self.vectors.unwrap_or_default(), ordering: self.ordering.unwrap_or_default(), shard_key_selector: self.shard_key_selector.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -87,6 +96,7 @@ impl DeletePointVectorsBuilder { vectors: core::default::Default::default(), ordering: core::default::Default::default(), shard_key_selector: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/delete_points_builder.rs b/src/builders/delete_points_builder.rs index 995f3d84..c87bcf66 100644 --- a/src/builders/delete_points_builder.rs +++ b/src/builders/delete_points_builder.rs @@ -13,6 +13,8 @@ pub struct DeletePointsBuilder { pub(crate) ordering: Option>, /// Option for custom sharding to specify used shard keys pub(crate) shard_key_selector: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl DeletePointsBuilder { @@ -52,6 +54,12 @@ impl DeletePointsBuilder { new.shard_key_selector = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(DeletePoints { @@ -67,6 +75,7 @@ impl DeletePointsBuilder { points: { convert_option(&self.points) }, ordering: self.ordering.unwrap_or_default(), shard_key_selector: self.shard_key_selector.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -77,6 +86,7 @@ impl DeletePointsBuilder { points: core::default::Default::default(), ordering: core::default::Default::default(), shard_key_selector: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/feedback_item_builder.rs b/src/builders/feedback_item_builder.rs new file mode 100644 index 00000000..98f8dec5 --- /dev/null +++ b/src/builders/feedback_item_builder.rs @@ -0,0 +1,46 @@ +use crate::qdrant::*; + +#[derive(Clone)] +pub struct FeedbackItemBuilder { + /// The id or vector from the original model + pub(crate) example: VectorInput, + /// Score for this vector as determined by the feedback provider + pub(crate) score: f32, +} + +impl FeedbackItemBuilder { + /// Create a new builder with an example and its score. + /// + /// # Arguments + /// + /// * `example` - The id or vector from the original model. + /// * `score` - Score for this vector as determined by the feedback provider. + /// + /// # Examples + /// + /// ``` + /// use qdrant_client::qdrant::{FeedbackItemBuilder, PointId, VectorInput}; + /// + /// let item = FeedbackItemBuilder::new(VectorInput::new_id(PointId::from(42)), 0.9); + /// ``` + pub fn new(example: impl Into, score: f32) -> Self { + Self { + example: example.into(), + score, + } + } + + /// Builds the desired type. Can often be omitted. + pub fn build(self) -> FeedbackItem { + FeedbackItem { + example: Some(self.example), + score: self.score, + } + } +} + +impl From for FeedbackItem { + fn from(value: FeedbackItemBuilder) -> Self { + value.build() + } +} diff --git a/src/builders/feedback_strategy_builder.rs b/src/builders/feedback_strategy_builder.rs new file mode 100644 index 00000000..e9d2df10 --- /dev/null +++ b/src/builders/feedback_strategy_builder.rs @@ -0,0 +1,44 @@ +use crate::qdrant::*; + +#[derive(Clone)] +pub struct FeedbackStrategyBuilder { + pub(crate) variant: feedback_strategy::Variant, +} + +impl FeedbackStrategyBuilder { + /// Create a naive feedback strategy with specified coefficients. + /// + /// The naive strategy uses the formula: `a * score + sim(confidence^b * c * delta)` + /// + /// # Arguments + /// + /// * `a` - Coefficient for the original score component. + /// * `b` - Exponent for confidence in the feedback component. + /// * `c` - Coefficient for the delta in the feedback component. + /// + /// # Examples + /// + /// ``` + /// use qdrant_client::qdrant::FeedbackStrategyBuilder; + /// + /// let strategy = FeedbackStrategyBuilder::naive(1.0, 1.0, 1.0); + /// ``` + pub fn naive(a: f32, b: f32, c: f32) -> Self { + Self { + variant: feedback_strategy::Variant::Naive(NaiveFeedbackStrategy { a, b, c }), + } + } + + /// Builds the desired type. Can often be omitted. + pub fn build(self) -> FeedbackStrategy { + FeedbackStrategy { + variant: Some(self.variant), + } + } +} + +impl From for FeedbackStrategy { + fn from(value: FeedbackStrategyBuilder) -> Self { + value.build() + } +} diff --git a/src/builders/float_index_params_builder.rs b/src/builders/float_index_params_builder.rs index 84de84a7..4fb57865 100644 --- a/src/builders/float_index_params_builder.rs +++ b/src/builders/float_index_params_builder.rs @@ -6,6 +6,8 @@ pub struct FloatIndexParamsBuilder { pub(crate) on_disk: Option>, /// If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. pub(crate) is_principal: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl Default for FloatIndexParamsBuilder { @@ -31,11 +33,18 @@ impl FloatIndexParamsBuilder { new.is_principal = Option::Some(Option::Some(value)); new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(FloatIndexParams { on_disk: self.on_disk.unwrap_or_default(), is_principal: self.is_principal.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -43,6 +52,7 @@ impl FloatIndexParamsBuilder { Self { on_disk: core::default::Default::default(), is_principal: core::default::Default::default(), + enable_hnsw: core::default::Default::default(), } } } diff --git a/src/builders/geo_index_params_builder.rs b/src/builders/geo_index_params_builder.rs index c82f4528..4334c8fa 100644 --- a/src/builders/geo_index_params_builder.rs +++ b/src/builders/geo_index_params_builder.rs @@ -4,6 +4,8 @@ use crate::qdrant::*; pub struct GeoIndexParamsBuilder { /// If true - store index on disk. pub(crate) on_disk: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl Default for GeoIndexParamsBuilder { @@ -23,16 +25,24 @@ impl GeoIndexParamsBuilder { new.on_disk = Option::Some(Option::Some(value)); new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(GeoIndexParams { on_disk: self.on_disk.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. fn create_empty() -> Self { Self { on_disk: core::default::Default::default(), + enable_hnsw: core::default::Default::default(), } } } diff --git a/src/builders/integer_index_params_builder.rs b/src/builders/integer_index_params_builder.rs index d8a86f30..058b0e77 100644 --- a/src/builders/integer_index_params_builder.rs +++ b/src/builders/integer_index_params_builder.rs @@ -10,6 +10,8 @@ pub struct IntegerIndexParamsBuilder { pub(crate) is_principal: Option>, /// If true - store index on disk. pub(crate) on_disk: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl IntegerIndexParamsBuilder { @@ -41,6 +43,12 @@ impl IntegerIndexParamsBuilder { new.on_disk = Option::Some(Option::Some(value)); new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(IntegerIndexParams { @@ -48,6 +56,7 @@ impl IntegerIndexParamsBuilder { range: self.range.unwrap_or_default(), is_principal: self.is_principal.unwrap_or_default(), on_disk: self.on_disk.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -57,6 +66,7 @@ impl IntegerIndexParamsBuilder { range: core::default::Default::default(), is_principal: core::default::Default::default(), on_disk: core::default::Default::default(), + enable_hnsw: core::default::Default::default(), } } } diff --git a/src/builders/keyword_index_params_builder.rs b/src/builders/keyword_index_params_builder.rs index e56f1ae5..fbc038d3 100644 --- a/src/builders/keyword_index_params_builder.rs +++ b/src/builders/keyword_index_params_builder.rs @@ -6,6 +6,8 @@ pub struct KeywordIndexParamsBuilder { pub(crate) is_tenant: Option>, /// If true - store index on disk. pub(crate) on_disk: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl Default for KeywordIndexParamsBuilder { @@ -27,11 +29,18 @@ impl KeywordIndexParamsBuilder { new.on_disk = Option::Some(Option::Some(value)); new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(KeywordIndexParams { is_tenant: self.is_tenant.unwrap_or_default(), on_disk: self.on_disk.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -39,6 +48,7 @@ impl KeywordIndexParamsBuilder { Self { is_tenant: core::default::Default::default(), on_disk: core::default::Default::default(), + enable_hnsw: core::default::Default::default(), } } } diff --git a/src/builders/mod.rs b/src/builders/mod.rs index 1abe68ca..38843c72 100644 --- a/src/builders/mod.rs +++ b/src/builders/mod.rs @@ -258,3 +258,12 @@ pub use mmr_builder::MmrBuilder; mod rrf_builder; pub use rrf_builder::RrfBuilder; + +mod relevance_feedback_input_builder; +pub use relevance_feedback_input_builder::RelevanceFeedbackInputBuilder; + +mod feedback_item_builder; +pub use feedback_item_builder::FeedbackItemBuilder; + +mod feedback_strategy_builder; +pub use feedback_strategy_builder::FeedbackStrategyBuilder; diff --git a/src/builders/optimizers_config_diff_builder.rs b/src/builders/optimizers_config_diff_builder.rs index 8fa83bf0..7b2c9e2f 100644 --- a/src/builders/optimizers_config_diff_builder.rs +++ b/src/builders/optimizers_config_diff_builder.rs @@ -57,6 +57,11 @@ pub struct OptimizersConfigDiffBuilder { /// - If `auto` - have no limit and choose dynamically to saturate CPU. /// - If `disabled` or `0` - no optimization threads, optimizations will be disabled. pub(crate) max_optimization_threads: Option>, + /// + /// If true, all segments will be forced to have the indexes built. + /// Using this option may lead to increased delay between submitting an update and its application. + /// Default is disabled. + pub(crate) prevent_unoptimized: Option>, } impl OptimizersConfigDiffBuilder { @@ -174,6 +179,16 @@ impl OptimizersConfigDiffBuilder { new } + /// + /// If true, all segments will be forced to have the indexes built. + /// Using this option may lead to increased delay between submitting an update and its application. + /// Default is disabled. + pub fn prevent_unoptimized(self, value: bool) -> Self { + let mut new = self; + new.prevent_unoptimized = Option::Some(Option::Some(value)); + new + } + fn build_inner(self) -> Result { Ok(OptimizersConfigDiff { deleted_threshold: self.deleted_threshold.unwrap_or_default(), @@ -186,6 +201,7 @@ impl OptimizersConfigDiffBuilder { max_optimization_threads: self.max_optimization_threads.unwrap_or_default(), // Deprecated: replaced with max_optimization_threads deprecated_max_optimization_threads: None, + prevent_unoptimized: self.prevent_unoptimized.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -199,6 +215,7 @@ impl OptimizersConfigDiffBuilder { indexing_threshold: core::default::Default::default(), flush_interval_sec: core::default::Default::default(), max_optimization_threads: core::default::Default::default(), + prevent_unoptimized: core::default::Default::default(), } } } diff --git a/src/builders/relevance_feedback_input_builder.rs b/src/builders/relevance_feedback_input_builder.rs new file mode 100644 index 00000000..9a8ef446 --- /dev/null +++ b/src/builders/relevance_feedback_input_builder.rs @@ -0,0 +1,61 @@ +use crate::qdrant::*; + +#[derive(Clone)] +pub struct RelevanceFeedbackInputBuilder { + /// The original query vector + pub(crate) target: VectorInput, + /// Previous results scored by the feedback provider + pub(crate) feedback: Vec, + /// Formula and trained coefficients to use + pub(crate) strategy: Option, +} + +impl RelevanceFeedbackInputBuilder { + /// Create a new builder with a target vector. + /// + /// # Arguments + /// + /// * `target` - The original query vector to search around. + /// + /// # Examples + /// + /// ``` + /// use qdrant_client::qdrant::{RelevanceFeedbackInputBuilder, VectorInput}; + /// + /// let builder = RelevanceFeedbackInputBuilder::new(VectorInput::new_dense(vec![0.1, 0.2, 0.3])); + /// ``` + pub fn new(target: impl Into) -> Self { + Self { + target: target.into(), + feedback: Vec::new(), + strategy: None, + } + } + + /// Add a single feedback item. + pub fn add_feedback(mut self, item: impl Into) -> Self { + self.feedback.push(item.into()); + self + } + + /// Set the feedback strategy. + pub fn strategy(mut self, value: impl Into) -> Self { + self.strategy = Some(value.into()); + self + } + + /// Builds the desired type. Can often be omitted. + pub fn build(self) -> RelevanceFeedbackInput { + RelevanceFeedbackInput { + target: Some(self.target), + feedback: self.feedback, + strategy: self.strategy, + } + } +} + +impl From for RelevanceFeedbackInput { + fn from(value: RelevanceFeedbackInputBuilder) -> Self { + value.build() + } +} diff --git a/src/builders/rrf_builder.rs b/src/builders/rrf_builder.rs index 94f9a555..3dd49a45 100644 --- a/src/builders/rrf_builder.rs +++ b/src/builders/rrf_builder.rs @@ -9,6 +9,10 @@ pub struct RrfBuilder { /// /// Default value is 60. pub(crate) k: Option>, + /// Weights for each prefetch source. + /// Higher weight gives more influence on the final ranking. + /// If not specified, all prefetches are weighted equally. + pub(crate) weights: Option>, } impl RrfBuilder { @@ -54,9 +58,19 @@ impl RrfBuilder { new } + /// Weights for each prefetch source. + /// Higher weight gives more influence on the final ranking. + /// If not specified, all prefetches are weighted equally. + pub fn weights(self, value: Vec) -> Self { + let mut new = self; + new.weights = Option::Some(value); + new + } + pub fn build(self) -> Rrf { Rrf { k: self.k.unwrap_or_default(), + weights: self.weights.unwrap_or_default(), } } @@ -64,6 +78,7 @@ impl RrfBuilder { fn create_empty() -> Self { Self { k: core::default::Default::default(), + weights: core::default::Default::default(), } } } diff --git a/src/builders/set_payload_points_builder.rs b/src/builders/set_payload_points_builder.rs index 2e270fc7..9e5f9e94 100644 --- a/src/builders/set_payload_points_builder.rs +++ b/src/builders/set_payload_points_builder.rs @@ -17,6 +17,8 @@ pub struct SetPayloadPointsBuilder { pub(crate) shard_key_selector: Option>, /// Option for indicate property of payload pub(crate) key: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl SetPayloadPointsBuilder { @@ -68,6 +70,12 @@ impl SetPayloadPointsBuilder { new.key = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(SetPayloadPoints { @@ -92,6 +100,7 @@ impl SetPayloadPointsBuilder { ordering: self.ordering.unwrap_or_default(), shard_key_selector: self.shard_key_selector.unwrap_or_default(), key: self.key.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -104,6 +113,7 @@ impl SetPayloadPointsBuilder { ordering: core::default::Default::default(), shard_key_selector: core::default::Default::default(), key: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/text_index_params_builder.rs b/src/builders/text_index_params_builder.rs index 0dca9ece..5c6286ec 100644 --- a/src/builders/text_index_params_builder.rs +++ b/src/builders/text_index_params_builder.rs @@ -19,6 +19,8 @@ pub struct TextIndexParamsBuilder { pub(crate) stemmer: Option>, /// If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). Default: false. pub(crate) ascii_folding: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl TextIndexParamsBuilder { @@ -113,6 +115,13 @@ impl TextIndexParamsBuilder { new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } + fn build_inner(self) -> Result { Ok(TextIndexParams { tokenizer: match self.tokenizer { @@ -131,6 +140,7 @@ impl TextIndexParamsBuilder { phrase_matching: self.phrase_matching.unwrap_or_default(), stemmer: self.stemmer.unwrap_or_default(), ascii_folding: self.ascii_folding.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } @@ -146,6 +156,7 @@ impl TextIndexParamsBuilder { phrase_matching: Default::default(), stemmer: Default::default(), ascii_folding: Default::default(), + enable_hnsw: Default::default(), } } } diff --git a/src/builders/update_batch_points_builder.rs b/src/builders/update_batch_points_builder.rs index a789f511..325abccf 100644 --- a/src/builders/update_batch_points_builder.rs +++ b/src/builders/update_batch_points_builder.rs @@ -9,6 +9,8 @@ pub struct UpdateBatchPointsBuilder { pub(crate) operations: Option>, /// Write ordering guarantees pub(crate) ordering: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl UpdateBatchPointsBuilder { @@ -35,6 +37,12 @@ impl UpdateBatchPointsBuilder { new.ordering = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(UpdateBatchPoints { @@ -56,6 +64,7 @@ impl UpdateBatchPointsBuilder { } }, ordering: self.ordering.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -65,6 +74,7 @@ impl UpdateBatchPointsBuilder { wait: core::default::Default::default(), operations: core::default::Default::default(), ordering: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/update_point_vectors_builder.rs b/src/builders/update_point_vectors_builder.rs index fc7246f8..14e3c646 100644 --- a/src/builders/update_point_vectors_builder.rs +++ b/src/builders/update_point_vectors_builder.rs @@ -14,6 +14,8 @@ pub struct UpdatePointVectorsBuilder { pub(crate) shard_key_selector: Option>, /// Optional filter to apply to the update operation. If set, only points matching the filter will be updated. pub(crate) update_filter: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, } impl UpdatePointVectorsBuilder { @@ -56,6 +58,12 @@ impl UpdatePointVectorsBuilder { new.update_filter = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(UpdatePointVectors { @@ -79,6 +87,7 @@ impl UpdatePointVectorsBuilder { ordering: self.ordering.unwrap_or_default(), shard_key_selector: self.shard_key_selector.unwrap_or_default(), update_filter: self.update_filter.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -90,6 +99,7 @@ impl UpdatePointVectorsBuilder { ordering: core::default::Default::default(), shard_key_selector: core::default::Default::default(), update_filter: core::default::Default::default(), + timeout: core::default::Default::default(), } } } diff --git a/src/builders/upsert_points_builder.rs b/src/builders/upsert_points_builder.rs index 5d4965dd..f5f01039 100644 --- a/src/builders/upsert_points_builder.rs +++ b/src/builders/upsert_points_builder.rs @@ -13,6 +13,10 @@ pub struct UpsertPointsBuilder { pub(crate) shard_key_selector: Option>, /// Optional filter to apply to the upsert operation. If set, only points matching the filter will be updated, others will be inserted. pub(crate) update_filter: Option>, + /// Timeout for the request in seconds + pub(crate) timeout: Option>, + /// Mode of the upsert operation: insert_only, upsert (default), update_only + pub(crate) update_mode: Option>, } impl UpsertPointsBuilder { @@ -54,6 +58,18 @@ impl UpsertPointsBuilder { new.update_filter = Option::Some(Option::Some(value.into())); new } + /// Timeout for the request in seconds + pub fn timeout(self, value: u64) -> Self { + let mut new = self; + new.timeout = Option::Some(Option::Some(value)); + new + } + /// Mode of the upsert operation: insert_only, upsert (default), update_only + pub fn update_mode(self, value: UpdateMode) -> Self { + let mut new = self; + new.update_mode = Option::Some(Option::Some(value.into())); + new + } fn build_inner(self) -> Result { Ok(UpsertPoints { @@ -77,6 +93,8 @@ impl UpsertPointsBuilder { ordering: self.ordering.unwrap_or_default(), shard_key_selector: self.shard_key_selector.unwrap_or_default(), update_filter: self.update_filter.unwrap_or_default(), + timeout: self.timeout.unwrap_or_default(), + update_mode: self.update_mode.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -88,6 +106,8 @@ impl UpsertPointsBuilder { ordering: core::default::Default::default(), shard_key_selector: core::default::Default::default(), update_filter: core::default::Default::default(), + timeout: core::default::Default::default(), + update_mode: core::default::Default::default(), } } } diff --git a/src/builders/uuid_index_params_builder.rs b/src/builders/uuid_index_params_builder.rs index 4a5de683..a8967fbd 100644 --- a/src/builders/uuid_index_params_builder.rs +++ b/src/builders/uuid_index_params_builder.rs @@ -6,6 +6,8 @@ pub struct UuidIndexParamsBuilder { pub(crate) is_tenant: Option>, /// If true - store index on disk. pub(crate) on_disk: Option>, + /// If true - enable HNSW index for this field. + pub(crate) enable_hnsw: Option>, } impl UuidIndexParamsBuilder { @@ -21,11 +23,18 @@ impl UuidIndexParamsBuilder { new.on_disk = Option::Some(Option::Some(value)); new } + /// If true - enable HNSW index for this field. + pub fn enable_hnsw(self, value: bool) -> Self { + let mut new = self; + new.enable_hnsw = Option::Some(Option::Some(value)); + new + } fn build_inner(self) -> Result { Ok(UuidIndexParams { is_tenant: self.is_tenant.unwrap_or_default(), on_disk: self.on_disk.unwrap_or_default(), + enable_hnsw: self.enable_hnsw.unwrap_or_default(), }) } /// Create an empty builder, with all fields set to `None` or `PhantomData`. @@ -33,6 +42,7 @@ impl UuidIndexParamsBuilder { Self { is_tenant: core::default::Default::default(), on_disk: core::default::Default::default(), + enable_hnsw: core::default::Default::default(), } } } diff --git a/src/client/collection.rs b/src/client/collection.rs deleted file mode 100644 index fb369181..00000000 --- a/src/client/collection.rs +++ /dev/null @@ -1,429 +0,0 @@ -use std::future::Future; - -use tonic::codegen::InterceptedService; -use tonic::transport::Channel; -use tonic::Status; - -use crate::auth::TokenInterceptor; -use crate::client::QdrantClient; -use crate::qdrant::alias_operations::Action; -use crate::qdrant::collections_client::CollectionsClient; -use crate::qdrant::update_collection_cluster_setup_request::Operation; -use crate::qdrant::{ - shard_key, AliasOperations, ChangeAliases, CollectionClusterInfoRequest, - CollectionClusterInfoResponse, CollectionExistsRequest, CollectionOperationResponse, - CollectionParamsDiff, CreateAlias, CreateCollection, CreateShardKey, CreateShardKeyRequest, - CreateShardKeyResponse, DeleteAlias, DeleteCollection, DeleteShardKey, DeleteShardKeyRequest, - DeleteShardKeyResponse, GetCollectionInfoRequest, GetCollectionInfoResponse, HnswConfigDiff, - ListAliasesRequest, ListAliasesResponse, ListCollectionAliasesRequest, ListCollectionsRequest, - ListCollectionsResponse, OptimizersConfigDiff, QuantizationConfigDiff, RenameAlias, ShardKey, - SparseVectorConfig, UpdateCollection, UpdateCollectionClusterSetupRequest, - UpdateCollectionClusterSetupResponse, VectorsConfigDiff, -}; - -impl QdrantClient { - // Access to raw collection API - pub async fn with_collections_client>>( - &self, - f: impl Fn(CollectionsClient>) -> O, - ) -> anyhow::Result { - self.channel - .with_channel( - |channel| { - let service = self.with_api_key(channel); - let mut client = - CollectionsClient::new(service).max_decoding_message_size(usize::MAX); - if let Some(compression) = self.cfg.compression { - client = client - .send_compressed(compression.into()) - .accept_compressed(compression.into()); - } - f(client) - }, - false, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::list_collections` instead" - )] - pub async fn list_collections(&self) -> anyhow::Result { - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api.list(ListCollectionsRequest {}).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.8.0", - note = "use new `qdrant_client::Qdrant::collection_exists` instead" - )] - pub async fn has_collection(&self, collection_name: impl ToString) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let response = self.list_collections().await?; - let result = response - .collections - .into_iter() - .any(|c| c.name == collection_name); - - Ok(result) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::collection_exists` instead" - )] - pub async fn collection_exists(&self, collection_name: impl ToString) -> anyhow::Result { - let collection_name_ref = &collection_name.to_string(); - Ok(self - .with_collections_client(|mut collection_api| async move { - let request = CollectionExistsRequest { - collection_name: collection_name_ref.clone(), - }; - let result = collection_api.collection_exists(request).await?; - Ok(result - .into_inner() - .result - .map(|r| r.exists) - .unwrap_or(false)) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_collection` instead" - )] - pub async fn create_collection( - &self, - details: &CreateCollection, - ) -> anyhow::Result { - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api.create(details.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[allow(clippy::too_many_arguments)] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_collection` instead" - )] - pub async fn update_collection( - &self, - collection_name: impl ToString, - optimizers_config: Option<&OptimizersConfigDiff>, - params: Option<&CollectionParamsDiff>, - sparse_vectors_config: Option<&SparseVectorConfig>, - hnsw_config: Option<&HnswConfigDiff>, - vectors_config: Option<&VectorsConfigDiff>, - quantization_config: Option<&QuantizationConfigDiff>, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api - .update(UpdateCollection { - collection_name: collection_name_ref.to_string(), - optimizers_config: optimizers_config.cloned(), - timeout: None, - params: params.cloned(), - sparse_vectors_config: sparse_vectors_config.cloned(), - hnsw_config: hnsw_config.cloned(), - vectors_config: vectors_config.cloned(), - quantization_config: quantization_config.cloned(), - strict_mode_config: None, - metadata: Default::default(), - }) - .await?; - - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_collection` instead" - )] - pub async fn delete_collection( - &self, - collection_name: impl ToString, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api - .delete(DeleteCollection { - collection_name: collection_name_ref.to_string(), - ..Default::default() - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::collection_info` instead" - )] - pub async fn collection_info( - &self, - collection_name: impl ToString, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api - .get(GetCollectionInfoRequest { - collection_name: collection_name_ref.to_string(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_alias` instead" - )] - pub async fn create_alias( - &self, - collection_name: impl ToString, - alias_name: impl ToString, - ) -> anyhow::Result { - let create_alias = CreateAlias { - collection_name: collection_name.to_string(), - alias_name: alias_name.to_string(), - }; - let change_aliases = ChangeAliases { - actions: vec![AliasOperations { - action: Some(Action::CreateAlias(create_alias)), - }], - timeout: None, - }; - self.update_aliases(change_aliases).await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_alias` instead" - )] - pub async fn delete_alias( - &self, - alias_name: impl ToString, - ) -> anyhow::Result { - let delete_alias = DeleteAlias { - alias_name: alias_name.to_string(), - }; - let change_aliases = ChangeAliases { - actions: vec![AliasOperations { - action: Some(Action::DeleteAlias(delete_alias)), - }], - timeout: None, - }; - self.update_aliases(change_aliases).await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::rename_alias` instead" - )] - pub async fn rename_alias( - &self, - old_alias_name: impl ToString, - new_alias_name: impl ToString, - ) -> anyhow::Result { - let rename_alias = RenameAlias { - old_alias_name: old_alias_name.to_string(), - new_alias_name: new_alias_name.to_string(), - }; - let change_aliases = ChangeAliases { - actions: vec![AliasOperations { - action: Some(Action::RenameAlias(rename_alias)), - }], - timeout: None, - }; - self.update_aliases(change_aliases).await - } - - // lower level API - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_alias`, `qdrant_client::Qdrant::rename_alias` or `qdrant_client::Qdrant::delete_alias` instead" - )] - pub async fn update_aliases( - &self, - change_aliases: ChangeAliases, - ) -> anyhow::Result { - let change_aliases = change_aliases.clone(); - let chang_aliases_ref = &change_aliases; - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api - .update_aliases(chang_aliases_ref.clone()) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::list_collection_aliases` instead" - )] - pub async fn list_collection_aliases( - &self, - collection_name: impl ToString, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api - .list_collection_aliases(ListCollectionAliasesRequest { - collection_name: collection_name_ref.to_string(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::list_aliases` instead" - )] - pub async fn list_aliases(&self) -> anyhow::Result { - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api.list_aliases(ListAliasesRequest {}).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::collection_cluster_info` instead" - )] - pub async fn collection_cluster_info( - &self, - collection_name: impl ToString, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - - Ok(self - .with_collections_client(|mut collection_api| async move { - let request = CollectionClusterInfoRequest { - collection_name: collection_name_ref.to_string(), - }; - let result = collection_api.collection_cluster_info(request).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_shard_key` instead" - )] - pub async fn create_shard_key( - &self, - collection_name: impl AsRef, - shard_key: &shard_key::Key, - shards_number: Option, - replication_factor: Option, - placement: &[u64], - ) -> anyhow::Result { - let collection_name = collection_name.as_ref(); - - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api - .create_shard_key(CreateShardKeyRequest { - collection_name: collection_name.to_string(), - request: Some(CreateShardKey { - shard_key: Some(ShardKey::from(shard_key.clone())), - shards_number, - replication_factor, - placement: placement.to_vec(), - initial_state: None, - }), - timeout: None, - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_shard_key` instead" - )] - pub async fn delete_shard_key( - &self, - collection_name: impl AsRef, - shard_key: &shard_key::Key, - ) -> anyhow::Result { - let collection_name = collection_name.as_ref(); - - Ok(self - .with_collections_client(|mut collection_api| async move { - let result = collection_api - .delete_shard_key(DeleteShardKeyRequest { - collection_name: collection_name.to_string(), - request: Some(DeleteShardKey { - shard_key: Some(ShardKey::from(shard_key.clone())), - }), - timeout: None, - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_collection_cluster_setup` instead" - )] - pub async fn update_collection_cluster_setup( - &self, - collection_name: impl ToString, - operation: Operation, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let operation_ref = &operation; - Ok(self - .with_collections_client(|mut collection_api| async move { - let request = UpdateCollectionClusterSetupRequest { - collection_name: collection_name_ref.to_string(), - timeout: None, - operation: Some(operation_ref.clone()), - }; - let result = collection_api - .update_collection_cluster_setup(request) - .await?; - Ok(result.into_inner()) - }) - .await?) - } -} diff --git a/src/client/config.rs b/src/client/config.rs deleted file mode 100644 index ad3fcd8c..00000000 --- a/src/client/config.rs +++ /dev/null @@ -1,212 +0,0 @@ -#![allow(deprecated)] - -use std::time::Duration; - -use crate::client::QdrantClient; - -#[deprecated( - since = "1.10.0", - note = "use new config at `qdrant_client::config::QdrantConfig` instead" -)] -pub struct QdrantClientConfig { - pub uri: String, - pub timeout: Duration, - pub connect_timeout: Duration, - pub keep_alive_while_idle: bool, - pub api_key: Option, - pub compression: Option, -} - -impl QdrantClientConfig { - pub fn from_url(url: &str) -> Self { - QdrantClientConfig { - uri: url.to_string(), - ..Self::default() - } - } - - /// Sets the API key or token - pub fn set_api_key(&mut self, api_key: &str) { - self.api_key = Some(api_key.to_string()); - } - - pub fn set_timeout(&mut self, timeout: Duration) { - self.timeout = timeout; - } - - pub fn set_connect_timeout(&mut self, connect_timeout: Duration) { - self.connect_timeout = connect_timeout; - } - - pub fn set_keep_alive_while_idle(&mut self, keep_alive_while_idle: bool) { - self.keep_alive_while_idle = keep_alive_while_idle; - } - - pub fn set_compression(&mut self, compression: Option) { - self.compression = compression; - } - - /// set the API key, builder-like. The API key argument can be any of - /// `&str`, `String`, `Option<&str>`, `Option` or `Result`. - /// - /// # Examples: - /// - /// A typical use case might be getting the key from an env var: - /// ```rust, no_run - /// use qdrant_client::prelude::*; - /// - /// let client = QdrantClient::from_url("localhost:6334") - /// .with_api_key(std::env::var("QDRANT_API_KEY")) - /// .build(); - /// ``` - /// Another possibility might be getting it out of some config - /// ```rust, no_run - /// use qdrant_client::prelude::*; - ///# use std::collections::HashMap; - ///# let config: HashMap<&str, String> = HashMap::new(); - /// let client = QdrantClientConfig::from_url("localhost:6334") - /// .with_api_key(config.get("api_key")) - /// .build(); - /// ``` - #[deprecated( - since = "1.10.0", - note = "use `qdrant_client::config::QdrantConfig::api_key` instead" - )] - pub fn with_api_key(mut self, api_key: impl MaybeApiKey) -> Self { - self.api_key = api_key.maybe_key(); - self - } - - /// Configure the service to keep the connection alive while idle - pub fn keep_alive_while_idle(mut self) -> Self { - self.keep_alive_while_idle = true; - self - } - - /// Set the timeout for this client - #[deprecated( - since = "1.10.0", - note = "use `qdrant_client::config::QdrantConfig::timeout` instead" - )] - pub fn with_timeout(mut self, timeout: impl AsTimeout) -> Self { - self.timeout = timeout.timeout(); - self - } - - /// Set the connect timeout for this client - #[deprecated( - since = "1.10.0", - note = "use `qdrant_client::config::QdrantConfig::connect_timeout` instead" - )] - pub fn with_connect_timeout(mut self, timeout: impl AsTimeout) -> Self { - self.connect_timeout = timeout.timeout(); - self - } - - /// Set the compression to use for this client - #[deprecated( - since = "1.10.0", - note = "use `qdrant_client::config::QdrantConfig::compression` instead" - )] - pub fn with_compression(mut self, compression: Option) -> Self { - self.compression = compression; - self - } - - /// Build the QdrantClient - pub fn build(self) -> anyhow::Result { - QdrantClient::new(Some(self)) - } -} - -impl Default for QdrantClientConfig { - fn default() -> Self { - Self { - uri: String::from("http://localhost:6334"), - timeout: Duration::from_secs(5), - connect_timeout: Duration::from_secs(5), - keep_alive_while_idle: true, - api_key: None, - compression: None, - } - } -} - -/// The type of compression to use for requests. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum CompressionEncoding { - Gzip, -} - -impl From for tonic::codec::CompressionEncoding { - fn from(encoding: CompressionEncoding) -> Self { - match encoding { - CompressionEncoding::Gzip => tonic::codec::CompressionEncoding::Gzip, - } - } -} - -#[deprecated( - since = "1.10.0", - note = "use `qdrant_client::config::AsTimeout` instead" -)] -pub trait AsTimeout { - fn timeout(self) -> Duration; -} - -impl AsTimeout for Duration { - fn timeout(self) -> Duration { - self - } -} - -impl AsTimeout for u64 { - fn timeout(self) -> Duration { - Duration::from_secs(self) - } -} - -/// Helper type to allow setting an API key from various types -#[deprecated( - since = "1.10.0", - note = "use `qdrant_client::config::AsOptionApiKey` instead" -)] -pub trait MaybeApiKey { - fn maybe_key(self) -> Option; -} - -impl MaybeApiKey for &str { - fn maybe_key(self) -> Option { - Some(self.to_string()) - } -} - -impl MaybeApiKey for String { - fn maybe_key(self) -> Option { - Some(self) - } -} - -impl MaybeApiKey for Option { - fn maybe_key(self) -> Option { - self - } -} - -impl MaybeApiKey for Option<&String> { - fn maybe_key(self) -> Option { - self.map(ToOwned::to_owned) - } -} - -impl MaybeApiKey for Option<&str> { - fn maybe_key(self) -> Option { - self.map(ToOwned::to_owned) - } -} - -impl MaybeApiKey for Result { - fn maybe_key(self) -> Option { - self.ok() - } -} diff --git a/src/client/mod.rs b/src/client/mod.rs deleted file mode 100644 index eeb45b38..00000000 --- a/src/client/mod.rs +++ /dev/null @@ -1,117 +0,0 @@ -#![allow(deprecated)] - -pub mod collection; -#[deprecated( - since = "1.10.0", - note = "use new config types at `qdrant_client::config` instead" -)] -mod config; -pub mod points; -pub mod snapshot; - -use std::future::Future; - -use anyhow::Result; -pub use config::{AsTimeout, CompressionEncoding, MaybeApiKey, QdrantClientConfig}; -use tonic::codegen::InterceptedService; -use tonic::transport::{Channel, Uri}; -use tonic::Status; - -pub use crate::auth::TokenInterceptor; -use crate::channel_pool::ChannelPool; -pub use crate::payload::Payload; -use crate::qdrant::{qdrant_client, HealthCheckReply, HealthCheckRequest}; - -/// A builder for `QdrantClient`s -#[deprecated(since = "1.10.0", note = "use `qdrant_client::QdrantBuilder` instead")] -pub type QdrantClientBuilder = QdrantClientConfig; - -/// Deprecated Qdrant client -/// -/// # Deprecated -/// -/// This client is deprecated. -/// -/// Please switch to the new [`Qdrant`](crate::Qdrant) client. It is easier to use and provides a -/// more robust interface. -/// -/// See examples at the [crate root](crate) or at each individual [`Qdrant`](crate::Qdrant) -/// operation. -#[deprecated(since = "1.10.0", note = "use `qdrant_client::Qdrant` instead")] -pub struct QdrantClient { - pub channel: ChannelPool, - pub cfg: QdrantClientConfig, -} - -impl QdrantClient { - /// Create a builder to setup the client - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::from_url` instead" - )] - pub fn from_url(url: &str) -> QdrantClientBuilder { - QdrantClientBuilder::from_url(url) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::new` instead" - )] - pub fn new(cfg: Option) -> Result { - let cfg = cfg.unwrap_or_default(); - - let channel = ChannelPool::new( - cfg.uri.parse::()?, - cfg.timeout, - cfg.connect_timeout, - cfg.keep_alive_while_idle, - 1, - ); - - let client = Self { channel, cfg }; - - Ok(client) - } - - /// Wraps a channel with a token interceptor - fn with_api_key(&self, channel: Channel) -> InterceptedService { - let interceptor = TokenInterceptor::new(self.cfg.api_key.clone()); - InterceptedService::new(channel, interceptor) - } - - // Access to raw root qdrant API - pub async fn with_root_qdrant_client>>( - &self, - f: impl Fn(qdrant_client::QdrantClient>) -> O, - ) -> Result { - self.channel - .with_channel( - |channel| { - let service = self.with_api_key(channel); - let mut client = qdrant_client::QdrantClient::new(service) - .max_decoding_message_size(usize::MAX); - if let Some(compression) = self.cfg.compression { - client = client - .send_compressed(compression.into()) - .accept_compressed(compression.into()); - } - f(client) - }, - true, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::health_check` instead" - )] - pub async fn health_check(&self) -> Result { - Ok(self - .with_root_qdrant_client(|mut qdrant_api| async move { - let result = qdrant_api.health_check(HealthCheckRequest {}).await?; - Ok(result.into_inner()) - }) - .await?) - } -} diff --git a/src/client/points.rs b/src/client/points.rs deleted file mode 100644 index d517bba7..00000000 --- a/src/client/points.rs +++ /dev/null @@ -1,1243 +0,0 @@ -use std::future::Future; - -use tonic::codegen::InterceptedService; -use tonic::transport::Channel; -use tonic::Status; - -use crate::auth::TokenInterceptor; -use crate::client::{Payload, QdrantClient}; -use crate::prelude::{PointStruct, SearchPoints}; -use crate::qdrant::points_client::PointsClient; -use crate::qdrant::{ - shard_key, ClearPayloadPoints, CountPoints, CountResponse, CreateFieldIndexCollection, - DeleteFieldIndexCollection, DeletePayloadPoints, DeletePointVectors, DeletePoints, - DiscoverBatchPoints, DiscoverBatchResponse, DiscoverPoints, DiscoverResponse, FieldType, - GetPoints, GetResponse, PayloadIndexParams, PointId, PointVectors, PointsOperationResponse, - PointsSelector, PointsUpdateOperation, ReadConsistency, RecommendBatchPoints, - RecommendBatchResponse, RecommendGroupsResponse, RecommendPointGroups, RecommendPoints, - RecommendResponse, ScrollPoints, ScrollResponse, SearchBatchPoints, SearchBatchResponse, - SearchGroupsResponse, SearchPointGroups, SearchResponse, SetPayloadPoints, ShardKeySelector, - UpdateBatchPoints, UpdateBatchResponse, UpdatePointVectors, UpsertPoints, Usage, - VectorsSelector, WithPayloadSelector, WithVectorsSelector, WriteOrdering, -}; - -impl QdrantClient { - // Access to raw points API - pub async fn with_points_client>>( - &self, - f: impl Fn(PointsClient>) -> O, - ) -> anyhow::Result { - self.channel - .with_channel( - |channel| { - let service = self.with_api_key(channel); - let mut client = - PointsClient::new(service).max_decoding_message_size(usize::MAX); - if let Some(compression) = self.cfg.compression { - client = client - .send_compressed(compression.into()) - .accept_compressed(compression.into()); - } - f(client) - }, - true, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_points_batch` instead" - )] - async fn _batch_updates( - &self, - collection_name: impl ToString, - operations: &[PointsUpdateOperation], - ordering: Option, - wait: bool, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - Ok(self - .with_points_client(|mut points_api| async move { - Ok(points_api - .update_batch(UpdateBatchPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(wait), - operations: operations.to_vec(), - ordering: ordering_ref.cloned(), - }) - .await? - .into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_points_batch` instead" - )] - pub async fn batch_updates( - &self, - collection_name: impl ToString, - operations: &[PointsUpdateOperation], - ordering: Option, - ) -> anyhow::Result { - self._batch_updates(collection_name, operations, ordering, false) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_points_batch` instead" - )] - pub async fn batch_updates_blocking( - &self, - collection_name: impl ToString, - operations: &[PointsUpdateOperation], - ordering: Option, - ) -> anyhow::Result { - self._batch_updates(collection_name, operations, ordering, true) - .await - } - - /// Insert or update points into the collection. - /// If points with given ID already exist, they will be overwritten. - /// This method does *not* wait for completion of the operation, use - /// [`upsert_points_blocking`](Self::upsert_points_blocking) for that. - /// Also this method does not split the points to insert to avoid timeouts, - /// look at [`upsert_points_batch`](Self::upsert_points_batch) for that. - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::upsert_points` instead" - )] - pub async fn upsert_points( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: Vec, - ordering: Option, - ) -> anyhow::Result { - self._upsert_points( - collection_name, - shard_key_selector, - &points, - false, - ordering, - ) - .await - } - - /// Insert or update points into the collection, wait for completion. - /// If points with given ID already exist, they will be overwritten. - /// This method does not split the points to insert to avoid timeouts, - /// look at [`upsert_points_batch`](Self::upsert_points_batch) for that. - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::upsert_points` instead" - )] - pub async fn upsert_points_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: Vec, - ordering: Option, - ) -> anyhow::Result { - self._upsert_points(collection_name, shard_key_selector, &points, true, ordering) - .await - } - - #[inline] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::upsert_points` instead" - )] - async fn _upsert_points( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &[PointStruct], - block: bool, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - Ok(self - .with_points_client(|mut points_api| async move { - Ok(points_api - .upsert(UpsertPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(block), - points: points.to_vec(), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - update_filter: None, - }) - .await? - .into_inner()) - }) - .await?) - } - - /// Insert or update points into the collection, splitting in chunks. - /// If points with given ID already exist, they will be overwritten. - /// This method does *not* wait for completion of the operation, use - /// [`upsert_points_batch_blocking`](Self::upsert_points_batch_blocking) for that. - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::upsert_points_batch` instead" - )] - pub async fn upsert_points_batch( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: Vec, - ordering: Option, - chunk_size: usize, - ) -> anyhow::Result { - self._upsert_points_batch( - collection_name, - shard_key_selector, - &points, - false, - ordering, - chunk_size, - ) - .await - } - - /// Insert or update points into the collection, splitting in chunks and - /// waiting for completion of each. - /// If points with given ID already exist, they will be overwritten. - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::upsert_points_batch` instead" - )] - pub async fn upsert_points_batch_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: Vec, - ordering: Option, - chunk_size: usize, - ) -> anyhow::Result { - self._upsert_points_batch( - collection_name, - shard_key_selector, - &points, - true, - ordering, - chunk_size, - ) - .await - } - - #[inline] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::upsert_points_batch` instead" - )] - async fn _upsert_points_batch( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &[PointStruct], - block: bool, - ordering: Option, - chunk_size: usize, - ) -> anyhow::Result { - if points.len() < chunk_size { - return self - ._upsert_points(collection_name, shard_key_selector, points, block, ordering) - .await; - } - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - Ok(self - .with_points_client(|mut points_api| async move { - let mut resp = PointsOperationResponse { - result: None, - time: 0.0, - usage: None, - }; - for chunk in points.chunks(chunk_size) { - let PointsOperationResponse { - result, - time, - usage, - } = points_api - .upsert(UpsertPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(block), - points: chunk.to_vec(), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - update_filter: None, - }) - .await? - .into_inner(); - resp.result = result; - resp.time += time; - resp.usage = Usage::aggregate_opts(resp.usage, usage); - } - Ok(resp) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::set_payload` instead" - )] - pub async fn set_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - payload: Payload, - payload_key: Option, - ordering: Option, - ) -> anyhow::Result { - self._set_payload( - collection_name, - shard_key_selector, - points, - &payload, - payload_key, - false, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::set_payload` instead" - )] - pub async fn set_payload_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - payload: Payload, - payload_key: Option, - ordering: Option, - ) -> anyhow::Result { - self._set_payload( - collection_name, - shard_key_selector, - points, - &payload, - payload_key, - true, - ordering, - ) - .await - } - - #[inline] - #[allow(clippy::too_many_arguments)] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::set_payload` instead" - )] - async fn _set_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - payload: &Payload, - payload_key: Option, - block: bool, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - let payload_key_ref = payload_key.as_ref(); - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .set_payload(SetPayloadPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(block), - payload: payload.0.clone(), - points_selector: Some(points.clone()), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - key: payload_key_ref.cloned(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::overwrite_payload` instead" - )] - pub async fn overwrite_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - payload: Payload, - payload_key: Option, - ordering: Option, - ) -> anyhow::Result { - self._overwrite_payload( - collection_name, - shard_key_selector, - points, - &payload, - payload_key, - false, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::overwrite_payload` instead" - )] - pub async fn overwrite_payload_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - payload: Payload, - payload_key: Option, - ordering: Option, - ) -> anyhow::Result { - self._overwrite_payload( - collection_name, - shard_key_selector, - points, - &payload, - payload_key, - true, - ordering, - ) - .await - } - - #[inline] - #[allow(clippy::too_many_arguments)] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::overwrite_payload` instead" - )] - async fn _overwrite_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - payload: &Payload, - payload_key: Option, - block: bool, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - let payload_key_ref = payload_key.as_ref(); - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .overwrite_payload(SetPayloadPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(block), - payload: payload.0.clone(), - points_selector: Some(points.clone()), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - key: payload_key_ref.cloned(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_payload` instead" - )] - pub async fn delete_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - keys: Vec, - ordering: Option, - ) -> anyhow::Result { - self._delete_payload( - collection_name, - shard_key_selector, - points, - &keys, - false, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_payload` instead" - )] - pub async fn delete_payload_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - keys: Vec, - ordering: Option, - ) -> anyhow::Result { - self._delete_payload( - collection_name, - shard_key_selector, - points, - &keys, - true, - ordering, - ) - .await - } - - #[inline] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_payload` instead" - )] - async fn _delete_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - keys: &[String], - block: bool, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .delete_payload(DeletePayloadPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(block), - keys: keys.to_owned(), - points_selector: Some(points.clone()), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::clear_payload` instead" - )] - pub async fn clear_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points_selector: Option, - ordering: Option, - ) -> anyhow::Result { - self._clear_payload( - collection_name, - shard_key_selector, - points_selector.as_ref(), - false, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::clear_payload` instead" - )] - pub async fn clear_payload_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points_selector: Option, - ordering: Option, - ) -> anyhow::Result { - self._clear_payload( - collection_name, - shard_key_selector, - points_selector.as_ref(), - true, - ordering, - ) - .await - } - - #[inline] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::clear_payload` instead" - )] - async fn _clear_payload( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points_selector: Option<&PointsSelector>, - block: bool, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .clear_payload(ClearPayloadPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(block), - points: points_selector.cloned(), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::get_points` instead" - )] - pub async fn get_points( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &[PointId], - with_vectors: Option>, - with_payload: Option>, - read_consistency: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - - let with_vectors = with_vectors.map(|v| v.into()); - let with_payload = with_payload.map(|v| v.into()); - - let with_vectors_ref = with_vectors.as_ref(); - let with_payload_ref = with_payload.as_ref(); - let read_consistency_ref = read_consistency.as_ref(); - - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .get(GetPoints { - collection_name: collection_name_ref.to_string(), - ids: points.to_owned(), - with_payload: with_payload_ref.cloned(), - with_vectors: with_vectors_ref.cloned(), - read_consistency: read_consistency_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - timeout: None, - }) - .await?; - - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::search_points` instead" - )] - pub async fn search_points(&self, request: &SearchPoints) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.search(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::search_batch_points` instead" - )] - pub async fn search_batch_points( - &self, - request: &SearchBatchPoints, - ) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.search_batch(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::search_groups` instead" - )] - pub async fn search_groups( - &self, - request: &SearchPointGroups, - ) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.search_groups(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_points` instead" - )] - pub async fn delete_points( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - ordering: Option, - ) -> anyhow::Result { - self._delete_points(collection_name, shard_key_selector, false, points, ordering) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_points` instead" - )] - pub async fn delete_points_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &PointsSelector, - ordering: Option, - ) -> anyhow::Result { - self._delete_points(collection_name, shard_key_selector, true, points, ordering) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_points` instead" - )] - async fn _delete_points( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - blocking: bool, - points: &PointsSelector, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .delete(DeletePoints { - collection_name: collection_name_ref.to_string(), - wait: Some(blocking), - points: Some(points.clone()), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_vectors` instead" - )] - pub async fn delete_vectors( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points_selector: &PointsSelector, - vector_selector: &VectorsSelector, - ordering: Option, - ) -> anyhow::Result { - self._delete_vectors( - collection_name, - shard_key_selector, - false, - points_selector, - vector_selector, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_vectors` instead" - )] - pub async fn delete_vectors_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points_selector: &PointsSelector, - vector_selector: &VectorsSelector, - ordering: Option, - ) -> anyhow::Result { - self._delete_vectors( - collection_name, - shard_key_selector, - true, - points_selector, - vector_selector, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_vectors` instead" - )] - async fn _delete_vectors( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - blocking: bool, - points_selector: &PointsSelector, - vector_selector: &VectorsSelector, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .delete_vectors(DeletePointVectors { - collection_name: collection_name_ref.to_string(), - wait: Some(blocking), - points_selector: Some(points_selector.clone()), - vectors: Some(vector_selector.clone()), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_vectors` instead" - )] - pub async fn update_vectors( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &[PointVectors], - ordering: Option, - ) -> anyhow::Result { - self._update_vectors(collection_name, shard_key_selector, false, points, ordering) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_vectors` instead" - )] - pub async fn update_vectors_blocking( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - points: &[PointVectors], - ordering: Option, - ) -> anyhow::Result { - self._update_vectors(collection_name, shard_key_selector, true, points, ordering) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_vectors` instead" - )] - async fn _update_vectors( - &self, - collection_name: impl ToString, - shard_key_selector: Option>, - blocking: bool, - points: &[PointVectors], - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - let shard_keys = shard_key_selector.map(ShardKeySelector::from); - let shard_keys_ref = &shard_keys; - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .update_vectors(UpdatePointVectors { - collection_name: collection_name_ref.to_string(), - wait: Some(blocking), - points: points.to_owned(), - ordering: ordering_ref.cloned(), - shard_key_selector: shard_keys_ref.clone(), - update_filter: None, - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::scroll` instead" - )] - pub async fn scroll(&self, request: &ScrollPoints) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.scroll(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::recommend` instead" - )] - pub async fn recommend(&self, request: &RecommendPoints) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.recommend(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::recommend_batch` instead" - )] - pub async fn recommend_batch( - &self, - request: &RecommendBatchPoints, - ) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.recommend_batch(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::recommend_groups` instead" - )] - pub async fn recommend_groups( - &self, - request: &RecommendPointGroups, - ) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.recommend_groups(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::discover` instead" - )] - pub async fn discover(&self, request: &DiscoverPoints) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.discover(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::discover_batch` instead" - )] - pub async fn discover_batch( - &self, - request: &DiscoverBatchPoints, - ) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.discover_batch(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::count` instead" - )] - pub async fn count(&self, request: &CountPoints) -> anyhow::Result { - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api.count(request.clone()).await?; - Ok(result.into_inner()) - }) - .await?) - } - - /// Perform multiple point, vector and payload insert, update and delete operations in one request. - /// This method does *not* wait for completion of the operation, use - /// [`update_batch_points_blocking`](Self::update_batch_points_blocking) for that. - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_points_batch` instead" - )] - pub async fn update_batch_points( - &self, - collection_name: impl ToString, - operations: &[PointsUpdateOperation], - ordering: Option, - ) -> anyhow::Result { - self._update_batch_points(collection_name, false, operations, ordering) - .await - } - - /// Perform multiple point, vector and payload insert, update and delete operations in one request. - /// This method waits for completion on each operation. - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_points_batch` instead" - )] - pub async fn update_batch_points_blocking( - &self, - collection_name: impl ToString, - operations: &[PointsUpdateOperation], - ordering: Option, - ) -> anyhow::Result { - self._update_batch_points(collection_name, true, operations, ordering) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::update_points_batch` instead" - )] - async fn _update_batch_points( - &self, - collection_name: impl ToString, - blocking: bool, - operations: &[PointsUpdateOperation], - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let ordering_ref = ordering.as_ref(); - - Ok(self - .with_points_client(|mut points_api| async move { - let result = points_api - .update_batch(UpdateBatchPoints { - collection_name: collection_name_ref.to_string(), - wait: Some(blocking), - operations: operations.to_owned(), - ordering: ordering_ref.cloned(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - /// Create index for a payload field - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_field_index` instead" - )] - pub async fn _create_field_index( - &self, - collection_name: impl ToString, - field_name: impl ToString, - field_type: FieldType, - field_index_params: Option<&PayloadIndexParams>, - wait: bool, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let field_name = field_name.to_string(); - let field_name_ref = field_name.as_str(); - let ordering_ref = ordering.as_ref(); - - Ok(self - .with_points_client(|mut client| async move { - let result = client - .create_field_index(CreateFieldIndexCollection { - collection_name: collection_name_ref.to_string(), - wait: Some(wait), - field_name: field_name_ref.to_string(), - field_type: Some(field_type.into()), - field_index_params: field_index_params.cloned(), - ordering: ordering_ref.cloned(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_field_index` instead" - )] - pub async fn create_field_index( - &self, - collection_name: impl ToString, - field_name: impl ToString, - field_type: FieldType, - field_index_params: Option<&PayloadIndexParams>, - ordering: Option, - ) -> anyhow::Result { - self._create_field_index( - collection_name, - field_name, - field_type, - field_index_params, - false, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_field_index` instead" - )] - pub async fn create_field_index_blocking( - &self, - collection_name: impl ToString, - field_name: impl ToString, - field_type: FieldType, - field_index_params: Option<&PayloadIndexParams>, - ordering: Option, - ) -> anyhow::Result { - self._create_field_index( - collection_name, - field_name, - field_type, - field_index_params, - true, - ordering, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_field_index` instead" - )] - pub async fn _delete_field_index( - &self, - collection_name: impl ToString, - field_name: impl ToString, - wait: bool, - ordering: Option, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let field_name = field_name.to_string(); - let field_name_ref = field_name.as_str(); - let ordering_ref = ordering.as_ref(); - - Ok(self - .with_points_client(|mut client| async move { - let result = client - .delete_field_index(DeleteFieldIndexCollection { - collection_name: collection_name_ref.to_string(), - wait: Some(wait), - field_name: field_name_ref.to_string(), - ordering: ordering_ref.cloned(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_field_index` instead" - )] - pub async fn delete_field_index( - &self, - collection_name: impl ToString, - field_name: impl ToString, - ordering: Option, - ) -> anyhow::Result { - self._delete_field_index(collection_name, field_name, false, ordering) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_field_index` instead" - )] - pub async fn delete_field_index_blocking( - &self, - collection_name: impl ToString, - field_name: impl ToString, - ordering: Option, - ) -> anyhow::Result { - self._delete_field_index(collection_name, field_name, true, ordering) - .await - } -} diff --git a/src/client/snapshot.rs b/src/client/snapshot.rs deleted file mode 100644 index b595d208..00000000 --- a/src/client/snapshot.rs +++ /dev/null @@ -1,220 +0,0 @@ -use std::future::Future; -#[cfg(feature = "download_snapshots")] -use std::path::PathBuf; - -use tonic::codegen::InterceptedService; -use tonic::transport::Channel; -use tonic::Status; - -use crate::auth::TokenInterceptor; -use crate::client::QdrantClient; -use crate::qdrant::snapshots_client::SnapshotsClient; -use crate::qdrant::{ - CreateFullSnapshotRequest, CreateSnapshotRequest, CreateSnapshotResponse, - DeleteFullSnapshotRequest, DeleteSnapshotRequest, DeleteSnapshotResponse, - ListFullSnapshotsRequest, ListSnapshotsRequest, ListSnapshotsResponse, -}; - -impl QdrantClient { - pub async fn with_snapshot_client>>( - &self, - f: impl Fn(SnapshotsClient>) -> O, - ) -> anyhow::Result { - self.channel - .with_channel( - |channel| { - let service = self.with_api_key(channel); - let mut client = - SnapshotsClient::new(service).max_decoding_message_size(usize::MAX); - if let Some(compression) = self.cfg.compression { - client = client - .send_compressed(compression.into()) - .accept_compressed(compression.into()); - } - f(client) - }, - false, - ) - .await - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_snapshot` instead" - )] - pub async fn create_snapshot( - &self, - collection_name: impl ToString, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - Ok(self - .with_snapshot_client(|mut client| async move { - let result = client - .create(CreateSnapshotRequest { - collection_name: collection_name_ref.to_string(), - }) - .await?; - - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::list_snapshot` instead" - )] - pub async fn list_snapshots( - &self, - collection_name: impl ToString, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - Ok(self - .with_snapshot_client(|mut client| async move { - let result = client - .list(ListSnapshotsRequest { - collection_name: collection_name_ref.to_string(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_snapshot` instead" - )] - pub async fn delete_snapshot( - &self, - collection_name: impl ToString, - snapshot_name: impl ToString, - ) -> anyhow::Result { - let collection_name = collection_name.to_string(); - let collection_name_ref = collection_name.as_str(); - let snapshot_name = snapshot_name.to_string(); - let snapshot_name_ref = snapshot_name.as_str(); - Ok(self - .with_snapshot_client(|mut client| async move { - let result = client - .delete(DeleteSnapshotRequest { - collection_name: collection_name_ref.to_string(), - snapshot_name: snapshot_name_ref.to_string(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::create_full_snapshot` instead" - )] - pub async fn create_full_snapshot(&self) -> anyhow::Result { - Ok(self - .with_snapshot_client(|mut client| async move { - let result = client.create_full(CreateFullSnapshotRequest {}).await?; - - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::list_full_snapshots` instead" - )] - pub async fn list_full_snapshots(&self) -> anyhow::Result { - Ok(self - .with_snapshot_client(|mut client| async move { - let result = client.list_full(ListFullSnapshotsRequest {}).await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::delete_full_snapshot` instead" - )] - pub async fn delete_full_snapshot( - &self, - snapshot_name: impl ToString, - ) -> anyhow::Result { - let snapshot_name = snapshot_name.to_string(); - let snapshot_name_ref = snapshot_name.as_str(); - Ok(self - .with_snapshot_client(|mut client| async move { - let result = client - .delete_full(DeleteFullSnapshotRequest { - snapshot_name: snapshot_name_ref.to_string(), - }) - .await?; - Ok(result.into_inner()) - }) - .await?) - } - - #[cfg(feature = "download_snapshots")] - #[deprecated( - since = "1.10.0", - note = "use new `qdrant_client::Qdrant::download_snapshot` instead" - )] - pub async fn download_snapshot( - &self, - out_path: impl Into, - collection_name: T, - snapshot_name: Option, - rest_api_uri: Option, - ) -> anyhow::Result<()> - where - T: ToString + Clone, - { - use std::io::Write; - - use futures_util::StreamExt; - - let snapshot_name = match snapshot_name { - Some(sn) => sn.to_string(), - _ => match self - .list_snapshots(collection_name.clone()) - .await? - .snapshot_descriptions - .first() - { - Some(sn) => sn.name.clone(), - _ => anyhow::bail!( - "No snapshots found for collection {}", - collection_name.to_string() - ), - }, - }; - - let mut stream = reqwest::get(format!( - "{}/collections/{}/snapshots/{}", - rest_api_uri - .map(|uri| uri.to_string()) - .unwrap_or_else(|| String::from("http://localhost:6333")), - collection_name.to_string(), - snapshot_name - )) - .await? - .bytes_stream(); - - let out_path = out_path.into(); - let _ = std::fs::remove_file(&out_path); - let mut file = std::fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(out_path)?; - - while let Some(chunk) = stream.next().await { - let _written = file.write(&chunk?)?; - } - - Ok(()) - } -} diff --git a/src/error.rs b/src/error.rs deleted file mode 100644 index 962f2b31..00000000 --- a/src/error.rs +++ /dev/null @@ -1,158 +0,0 @@ -#![allow(deprecated)] - -use std::error::Error; -use std::fmt::{Debug, Display, Formatter}; -use std::marker::PhantomData; - -use crate::qdrant::value::Kind::*; -use crate::qdrant::{ListValue, Struct, Value}; - -/// An error for failed conversions (e.g. calling [`String::try_from(v)`](String::try_from) on an -/// integer [`Value`]) -#[deprecated(since = "1.10.0", note = "new functions don't use this type anymore")] -pub struct NotA { - marker: PhantomData, -} - -impl Default for NotA { - fn default() -> Self { - NotA { - marker: PhantomData, - } - } -} -impl Error for NotA {} - -impl Debug for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } -} - -impl Display for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("not a Struct") - } -} - -impl Error for NotA {} - -impl Debug for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } -} - -impl Display for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("not a ListValue") - } -} - -// Error + Conversion impl for bool -impl Error for NotA {} - -impl Debug for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } -} - -impl Display for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("not a bool") - } -} - -impl TryFrom for bool { - type Error = NotA; - - fn try_from(v: Value) -> Result> { - if let Some(BoolValue(t)) = v.kind { - Ok(t) - } else { - Err(NotA::default()) - } - } -} - -// Error + Conversion impl for i64 -impl Error for NotA {} - -impl Debug for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } -} - -impl Display for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("not an i64") - } -} - -impl TryFrom for i64 { - type Error = NotA; - - fn try_from(v: Value) -> Result> { - if let Some(IntegerValue(t)) = v.kind { - Ok(t) - } else { - Err(NotA::default()) - } - } -} - -// Error + Conversion impl for f64 -impl Error for NotA {} - -impl Debug for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } -} - -impl Display for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("not a f64") - } -} - -impl TryFrom for f64 { - type Error = NotA; - - fn try_from(v: Value) -> Result> { - if let Some(DoubleValue(t)) = v.kind { - Ok(t) - } else { - Err(NotA::default()) - } - } -} - -// Error + Conversion impl for string -impl Error for NotA {} - -impl Debug for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } -} - -impl Display for NotA { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("not a String") - } -} - -impl TryFrom for String { - type Error = NotA; - - fn try_from(v: Value) -> Result> { - if let Some(StringValue(t)) = v.kind { - Ok(t) - } else { - Err(NotA::default()) - } - } -} diff --git a/src/grpc_conversions/extensions.rs b/src/grpc_conversions/extensions.rs index 6e34b7ed..a7abdfc2 100644 --- a/src/grpc_conversions/extensions.rs +++ b/src/grpc_conversions/extensions.rs @@ -4,16 +4,13 @@ use std::hash::{Hash, Hasher}; #[cfg(feature = "uuid")] use uuid::Uuid; -use crate::client::Payload; -#[allow(deprecated)] -use crate::error::NotA; -use crate::prelude::{PointStruct, Value}; +use crate::payload::Payload; #[cfg(feature = "uuid")] use crate::qdrant::point_id::PointIdOptions; use crate::qdrant::value::Kind; use crate::qdrant::{ - HardwareUsage, InferenceUsage, ListValue, ModelUsage, PointId, RetrievedPoint, ScoredPoint, - Struct, Usage, Vectors, + HardwareUsage, InferenceUsage, ListValue, ModelUsage, PointId, PointStruct, RetrievedPoint, + ScoredPoint, Struct, Usage, Value, Vectors, }; /// Null value @@ -161,8 +158,7 @@ impl Value { /// /// ``` /// use serde_json::json; - /// use qdrant_client::prelude::*; - /// use qdrant_client::qdrant::{value::Kind::*, Struct}; + /// use qdrant_client::qdrant::{value::Kind::*, Struct, Value}; /// let value = Value { kind: Some(StructValue(Struct { /// fields: [ /// ("text".into(), Value { kind: Some(StringValue("Hi Qdrant!".into())) }), @@ -245,17 +241,6 @@ impl Value { } } - /// Try to get an iterator over the items of the contained list value, if any - #[deprecated(since = "1.10.0", note = "use `try_list_iter` instead")] - #[allow(deprecated)] - pub fn iter_list(&self) -> Result, NotA> { - if let Some(Kind::ListValue(values)) = &self.kind { - Ok(values.iter()) - } else { - Err(NotA::default()) - } - } - /// Get a value from a struct field /// /// Returns `None` if this is not a struct type or if the field is not present. @@ -266,17 +251,6 @@ impl Value { None } } - - /// Try to get a field from the struct if this value contains one - #[deprecated(since = "1.10.0", note = "use `get_value` instead")] - #[allow(deprecated)] - pub fn get_struct(&self, key: &str) -> Result<&Value, NotA> { - if let Some(Kind::StructValue(Struct { fields })) = &self.kind { - Ok(fields.get(key).unwrap_or(&NULL_VALUE)) - } else { - Err(NotA::default()) - } - } } impl std::ops::Deref for ListValue { diff --git a/src/grpc_conversions/mod.rs b/src/grpc_conversions/mod.rs index c4c1075d..43ba09cf 100644 --- a/src/grpc_conversions/mod.rs +++ b/src/grpc_conversions/mod.rs @@ -3,7 +3,7 @@ pub mod metadata; mod primitives; pub mod vectors; -use crate::client::Payload; +use crate::payload::Payload; use crate::qdrant::point_id::PointIdOptions; use crate::qdrant::points_selector::PointsSelectorOneOf; use crate::qdrant::value::Kind; diff --git a/src/grpc_conversions/primitives.rs b/src/grpc_conversions/primitives.rs index a4cdd20d..4df69492 100644 --- a/src/grpc_conversions/primitives.rs +++ b/src/grpc_conversions/primitives.rs @@ -1,17 +1,16 @@ use std::collections::HashMap; -use crate::prelude::point_id::PointIdOptions; -use crate::prelude::{DeleteCollection, Value}; +use crate::qdrant::point_id::PointIdOptions; use crate::qdrant::value::Kind; use crate::qdrant::{ shard_key, with_payload_selector, with_vectors_selector, CollectionClusterInfoRequest, - CollectionExistsRequest, CreateSnapshotRequest, DeleteAlias, DeleteCollectionBuilder, - DeleteFullSnapshotRequest, GetCollectionInfoRequest, IsEmptyCondition, IsNullCondition, - ListCollectionAliasesRequest, ListSnapshotsRequest, PayloadExcludeSelector, - PayloadIncludeSelector, PointId, RepeatedIntegers, RepeatedStrings, ShardKey, ShardKeySelector, - SparseIndices, SparseVectorConfig, SparseVectorParams, Struct, VectorParams, VectorParamsDiff, - VectorParamsDiffMap, VectorParamsMap, VectorsSelector, WithPayloadSelector, - WithVectorsSelector, + CollectionExistsRequest, CreateSnapshotRequest, DeleteAlias, DeleteCollection, + DeleteCollectionBuilder, DeleteFullSnapshotRequest, GetCollectionInfoRequest, IsEmptyCondition, + IsNullCondition, ListCollectionAliasesRequest, ListShardKeysRequest, ListSnapshotsRequest, + PayloadExcludeSelector, PayloadIncludeSelector, PointId, RepeatedIntegers, RepeatedStrings, + ShardKey, ShardKeySelector, SparseIndices, SparseVectorConfig, SparseVectorParams, Struct, + Value, VectorParams, VectorParamsDiff, VectorParamsDiffMap, VectorParamsMap, VectorsSelector, + WithPayloadSelector, WithVectorsSelector, }; impl From for WithPayloadSelector { @@ -318,6 +317,14 @@ impl> From for GetCollectionInfoRequest { } } +impl> From for ListShardKeysRequest { + fn from(value: S) -> Self { + Self { + collection_name: value.into(), + } + } +} + impl> From for DeleteAlias { fn from(value: S) -> Self { Self { diff --git a/src/lib.rs b/src/lib.rs index de244e30..b0cc2de7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -144,30 +144,8 @@ mod grpc_macros; mod manual_builder; mod payload; mod qdrant_client; -// Deprecated modules -/// Deprecated Qdrant client -#[deprecated( - since = "1.10.0", - note = "use new client at `qdrant_client::Qdrant` instead" -)] -#[doc(hidden)] -pub mod client; -/// Deprecated error type -#[deprecated( - since = "1.10.0", - note = "use new error type at `qdrant_client::Error` instead" -)] -#[doc(hidden)] -pub mod error; -/// Deprecated prelude -#[deprecated(since = "1.10.0", note = "use types directly")] -#[doc(hidden)] -pub mod prelude; -/// Deprecated serde helper #[cfg(feature = "serde")] -#[deprecated(since = "1.10.0", note = "use `Payload::try_from` instead")] -#[doc(hidden)] -pub mod serde; +mod serde_impl; #[cfg(feature = "serde")] pub mod serde_deser; @@ -371,6 +349,7 @@ mod tests { field_type: Some(FieldType::Keyword as i32), field_index_params: None, ordering: None, + timeout: None, }) .await }) diff --git a/src/manual_builder.rs b/src/manual_builder.rs index 9fe1d3d9..d2600b7c 100644 --- a/src/manual_builder.rs +++ b/src/manual_builder.rs @@ -53,6 +53,7 @@ pub mod points_update_operation { points: builder.points, shard_key_selector: builder.shard_key_selector, update_filter: None, + update_mode: None, }) } } diff --git a/src/payload.rs b/src/payload.rs index 75bdb088..c510e353 100644 --- a/src/payload.rs +++ b/src/payload.rs @@ -68,12 +68,6 @@ impl Payload { Self(HashMap::with_capacity(capacity)) } - /// Construct a payload object from the given hash map - #[deprecated(since = "1.10.0", note = "use `Payload::from` instead")] - pub fn new_from_hashmap(payload: HashMap) -> Self { - Self(payload) - } - /// Insert a payload value at the given key, replacing any existing value pub fn insert(&mut self, key: impl ToString, val: impl Into) { self.0.insert(key.to_string(), val.into()); @@ -244,8 +238,7 @@ impl From for Value { mod tests { use serde_json::json; - use super::*; - use crate::client::Payload; + use super::{Payload, *}; #[test] fn json_payload_round_trip() { diff --git a/src/prelude.rs b/src/prelude.rs deleted file mode 100644 index 912408ba..00000000 --- a/src/prelude.rs +++ /dev/null @@ -1,6 +0,0 @@ -#![allow(deprecated)] - -pub use crate::client::*; -pub use crate::qdrant::{ - point_id, CreateCollection, DeleteCollection, Distance, PointStruct, SearchPoints, Value, -}; diff --git a/src/qdrant.rs b/src/qdrant.rs index 99c8f3ff..5bc86f41 100644 --- a/src/qdrant.rs +++ b/src/qdrant.rs @@ -319,7 +319,8 @@ pub struct GeoLineString { #[prost(message, repeated, tag = "1")] pub points: ::prost::alloc::vec::Vec, } -/// For a valid GeoPolygon, both the exterior and interior GeoLineStrings must consist of a minimum of 4 points. +/// For a valid GeoPolygon, both the exterior and interior GeoLineStrings must +/// consist of a minimum of 4 points. /// Additionally, the first and last points of each GeoLineString must be the same. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeoPolygon { @@ -349,13 +350,16 @@ pub struct VectorParams { /// Distance function used for comparing vectors #[prost(enumeration = "Distance", tag = "2")] pub distance: i32, - /// Configuration of vector HNSW graph. If omitted - the collection configuration will be used + /// Configuration of vector HNSW graph. + /// If omitted - the collection configuration will be used #[prost(message, optional, tag = "3")] pub hnsw_config: ::core::option::Option, - /// Configuration of vector quantization config. If omitted - the collection configuration will be used + /// Configuration of vector quantization config. + /// If omitted - the collection configuration will be used #[prost(message, optional, tag = "4")] pub quantization_config: ::core::option::Option, - /// If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM. + /// If true - serve vectors from disk. + /// If set to false, the vectors will be loaded in RAM. #[prost(bool, optional, tag = "5")] pub on_disk: ::core::option::Option, /// Data type of the vectors @@ -367,13 +371,15 @@ pub struct VectorParams { } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct VectorParamsDiff { - /// Update params for HNSW index. If empty object - it will be unset + /// Update params for HNSW index. + /// If empty object - it will be unset #[prost(message, optional, tag = "1")] pub hnsw_config: ::core::option::Option, /// Update quantization params. If none - it is left unchanged. #[prost(message, optional, tag = "2")] pub quantization_config: ::core::option::Option, - /// If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM. + /// If true - serve vectors from disk. + /// If set to false, the vectors will be loaded in RAM. #[prost(bool, optional, tag = "3")] pub on_disk: ::core::option::Option, } @@ -553,15 +559,14 @@ pub struct CollectionWarning { } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct HnswConfigDiff { - /// - /// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. + /// Number of edges per node in the index graph. + /// Larger the value - more accurate the search, more space required. #[prost(uint64, optional, tag = "1")] pub m: ::core::option::Option, - /// - /// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index. + /// Number of neighbours to consider during the index building. + /// Larger the value - more accurate the search, more time required to build the index. #[prost(uint64, optional, tag = "2")] pub ef_construct: ::core::option::Option, - /// /// Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. /// This measures the total size of vectors being queried against. /// When the maximum estimated amount of points that a condition satisfies is smaller than @@ -570,22 +575,19 @@ pub struct HnswConfigDiff { /// Note: 1Kb = 1 vector of size 256 #[prost(uint64, optional, tag = "3")] pub full_scan_threshold: ::core::option::Option, - /// /// Number of parallel threads used for background index building. /// If 0 - automatically select from 8 to 16. /// Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. /// On small CPUs, less threads are used. #[prost(uint64, optional, tag = "4")] pub max_indexing_threads: ::core::option::Option, - /// /// Store HNSW index on disk. If set to false, the index will be stored in RAM. #[prost(bool, optional, tag = "5")] pub on_disk: ::core::option::Option, - /// - /// Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used. + /// Number of additional payload-aware links per node in the index graph. + /// If not set - regular M parameter will be used. #[prost(uint64, optional, tag = "6")] pub payload_m: ::core::option::Option, - /// /// Store copies of original and quantized vectors within the HNSW index file. Default: false. /// Enabling this option will trade the search speed for disk usage by reducing amount of /// random seeks during the search. @@ -595,16 +597,13 @@ pub struct HnswConfigDiff { } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SparseIndexConfig { - /// /// Prefer a full scan search upto (excluding) this number of vectors. /// Note: this is number of vectors, not KiloBytes. #[prost(uint64, optional, tag = "1")] pub full_scan_threshold: ::core::option::Option, - /// /// Store inverted index on disk. If set to false, the index will be stored in RAM. #[prost(bool, optional, tag = "2")] pub on_disk: ::core::option::Option, - /// /// Datatype used to store weights in the index. #[prost(enumeration = "Datatype", optional, tag = "3")] pub datatype: ::core::option::Option, @@ -623,15 +622,14 @@ pub struct WalConfigDiff { } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct OptimizersConfigDiff { - /// - /// The minimal fraction of deleted vectors in a segment, required to perform segment optimization + /// The minimal fraction of deleted vectors in a segment, required to perform + /// segment optimization #[prost(double, optional, tag = "1")] pub deleted_threshold: ::core::option::Option, - /// - /// The minimal number of vectors in a segment, required to perform segment optimization + /// The minimal number of vectors in a segment, required to perform segment + /// optimization #[prost(uint64, optional, tag = "2")] pub vacuum_min_vector_number: ::core::option::Option, - /// /// Target amount of segments the optimizer will try to keep. /// Real amount of segments may vary depending on multiple parameters: /// @@ -642,7 +640,6 @@ pub struct OptimizersConfigDiff { /// so that each segment would be handled evenly by one of the threads. #[prost(uint64, optional, tag = "3")] pub default_segment_number: ::core::option::Option, - /// /// Deprecated: /// /// Do not create segments larger this size (in kilobytes). @@ -655,7 +652,6 @@ pub struct OptimizersConfigDiff { /// If not set, will be automatically selected considering the number of available CPUs. #[prost(uint64, optional, tag = "4")] pub max_segment_size: ::core::option::Option, - /// /// Maximum size (in kilobytes) of vectors to store in-memory per segment. /// Segments larger than this threshold will be stored as read-only memmapped file. /// @@ -666,30 +662,36 @@ pub struct OptimizersConfigDiff { /// Note: 1Kb = 1 vector of size 256 #[prost(uint64, optional, tag = "5")] pub memmap_threshold: ::core::option::Option, + /// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding + /// this threshold will enable vector indexing /// - /// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing - /// - /// Default value is 20,000, based on <.> + /// Default value is 20,000, based on + /// <.> /// /// To disable vector indexing, set to `0`. /// /// Note: 1kB = 1 vector of size 256. #[prost(uint64, optional, tag = "6")] pub indexing_threshold: ::core::option::Option, - /// /// Interval between forced flushes. #[prost(uint64, optional, tag = "7")] pub flush_interval_sec: ::core::option::Option, /// Deprecated in favor of `max_optimization_threads` #[prost(uint64, optional, tag = "8")] pub deprecated_max_optimization_threads: ::core::option::Option, - /// /// Max number of threads (jobs) for running optimizations per shard. /// Note: each optimization job will also use `max_indexing_threads` threads by itself for index building. /// If "auto" - have no limit and choose dynamically to saturate CPU. /// If 0 - no optimization threads, optimizations will be disabled. #[prost(message, optional, tag = "9")] pub max_optimization_threads: ::core::option::Option, + /// If this option is set, service will try to prevent creation of large unoptimized segments. + /// When enabled, updates may be blocked at request level if there are unoptimized segments larger than indexing threshold. + /// Updates will be resumed when optimization is completed and segments are optimized below the threshold. + /// Using this option may lead to increased delay between submitting an update and its application. + /// Default is disabled. + #[prost(bool, optional, tag = "10")] + pub prevent_unoptimized: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ScalarQuantization { @@ -775,8 +777,8 @@ pub struct BinaryQuantization { /// Binary quantization encoding method #[prost(enumeration = "BinaryQuantizationEncoding", optional, tag = "2")] pub encoding: ::core::option::Option, - /// - /// Asymmetric quantization configuration allows a query to have different quantization than stored vectors. + /// Asymmetric quantization configuration allows a query to have different + /// quantization than stored vectors. /// It can increase the accuracy of search at the cost of performance. #[prost(message, optional, tag = "3")] pub query_encoding: ::core::option::Option, @@ -921,13 +923,15 @@ pub struct CreateCollection { /// Configuration of the optimizers #[prost(message, optional, tag = "6")] pub optimizers_config: ::core::option::Option, - /// Number of shards in the collection, default is 1 for standalone, otherwise equal to the number of nodes. Minimum is 1 + /// Number of shards in the collection, default is 1 for standalone, otherwise + /// equal to the number of nodes. Minimum is 1 #[prost(uint32, optional, tag = "7")] pub shard_number: ::core::option::Option, /// If true - point's payload will not be stored in memory #[prost(bool, optional, tag = "8")] pub on_disk_payload: ::core::option::Option, - /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + /// Wait timeout for operation commit in seconds, if not specified - default + /// value will be supplied #[prost(uint64, optional, tag = "9")] pub timeout: ::core::option::Option, /// Configuration for vectors @@ -960,10 +964,13 @@ pub struct UpdateCollection { /// Name of the collection #[prost(string, tag = "1")] pub collection_name: ::prost::alloc::string::String, - /// New configuration parameters for the collection. This operation is blocking, it will only proceed once all current optimizations are complete + /// New configuration parameters for the collection. + /// This operation is blocking, it will only proceed once all current + /// optimizations are complete #[prost(message, optional, tag = "2")] pub optimizers_config: ::core::option::Option, - /// Wait timeout for operation commit in seconds if blocking, if not specified - default value will be supplied + /// Wait timeout for operation commit in seconds if blocking. + /// If not specified - default value will be supplied. #[prost(uint64, optional, tag = "3")] pub timeout: ::core::option::Option, /// New configuration parameters for the collection @@ -984,7 +991,8 @@ pub struct UpdateCollection { /// New strict mode configuration #[prost(message, optional, tag = "9")] pub strict_mode_config: ::core::option::Option, - /// Arbitrary JSON-like metadata for the collection, will be merged with already stored metadata + /// Arbitrary JSON-like metadata for the collection, will be merged with + /// already stored metadata #[prost(map = "string, message", tag = "10")] pub metadata: ::std::collections::HashMap<::prost::alloc::string::String, Value>, } @@ -993,7 +1001,8 @@ pub struct DeleteCollection { /// Name of the collection #[prost(string, tag = "1")] pub collection_name: ::prost::alloc::string::String, - /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + /// Wait timeout for operation commit in seconds. + /// If not specified - default value will be supplied. #[prost(uint64, optional, tag = "2")] pub timeout: ::core::option::Option, } @@ -1032,6 +1041,9 @@ pub struct CollectionParams { /// Configuration for sparse vectors #[prost(message, optional, tag = "10")] pub sparse_vectors_config: ::core::option::Option, + /// Define number of milliseconds to wait before attempting to read from another replica. + #[prost(uint64, optional, tag = "11")] + pub read_fan_out_delay_ms: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CollectionParamsDiff { @@ -1047,6 +1059,9 @@ pub struct CollectionParamsDiff { /// Fan-out every read request to these many additional remote nodes (and return first available response) #[prost(uint32, optional, tag = "4")] pub read_fan_out_factor: ::core::option::Option, + /// Define number of milliseconds to wait before attempting to read from another replica. + #[prost(uint64, optional, tag = "5")] + pub read_fan_out_delay_ms: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectionConfig { @@ -1080,6 +1095,11 @@ pub struct KeywordIndexParams { /// If true - store index on disk. #[prost(bool, optional, tag = "2")] pub on_disk: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "3")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct IntegerIndexParams { @@ -1089,27 +1109,45 @@ pub struct IntegerIndexParams { /// If true - support ranges filters. Default is true. #[prost(bool, optional, tag = "2")] pub range: ::core::option::Option, - /// If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. Default is false. + /// If true - use this key to organize storage of the collection data. + /// This option assumes that this key will be used in majority of filtered requests. + /// Default is false. #[prost(bool, optional, tag = "3")] pub is_principal: ::core::option::Option, /// If true - store index on disk. Default is false. #[prost(bool, optional, tag = "4")] pub on_disk: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "5")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct FloatIndexParams { /// If true - store index on disk. #[prost(bool, optional, tag = "1")] pub on_disk: ::core::option::Option, - /// If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. + /// If true - use this key to organize storage of the collection data. + /// This option assumes that this key will be used in majority of filtered requests. #[prost(bool, optional, tag = "2")] pub is_principal: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "3")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GeoIndexParams { /// If true - store index on disk. #[prost(bool, optional, tag = "1")] pub on_disk: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "2")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct StopwordsSet { @@ -1146,9 +1184,15 @@ pub struct TextIndexParams { /// Set an algorithm for stemming. #[prost(message, optional, tag = "8")] pub stemmer: ::core::option::Option, - /// If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). Default: false. + /// If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). + /// Default: false. #[prost(bool, optional, tag = "9")] pub ascii_folding: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "10")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct StemmingAlgorithm { @@ -1175,15 +1219,26 @@ pub struct BoolIndexParams { /// If true - store index on disk. #[prost(bool, optional, tag = "1")] pub on_disk: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "2")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct DatetimeIndexParams { /// If true - store index on disk. #[prost(bool, optional, tag = "1")] pub on_disk: ::core::option::Option, - /// If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. + /// If true - use this key to organize storage of the collection data. + /// This option assumes that this key will be used in majority of filtered requests. #[prost(bool, optional, tag = "2")] pub is_principal: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "3")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct UuidIndexParams { @@ -1193,6 +1248,11 @@ pub struct UuidIndexParams { /// If true - store index on disk. #[prost(bool, optional, tag = "2")] pub on_disk: ::core::option::Option, + /// Enable HNSW graph building for this payload field. + /// If true, builds additional HNSW links (Need payload_m > 0). + /// Default: true. + #[prost(bool, optional, tag = "3")] + pub enable_hnsw: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct PayloadIndexParams { @@ -1240,10 +1300,16 @@ pub struct PayloadSchemaInfo { /// Field index parameters #[prost(message, optional, tag = "2")] pub params: ::core::option::Option, - /// Number of points indexed within this field indexed + /// Number of points indexed within this field #[prost(uint64, optional, tag = "3")] pub points: ::core::option::Option, } +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct UpdateQueueInfo { + /// Number of elements in the queue + #[prost(uint64, tag = "1")] + pub length: u64, +} #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectionInfo { /// operating condition of the collection @@ -1273,13 +1339,17 @@ pub struct CollectionInfo { /// Warnings related to the collection #[prost(message, repeated, tag = "11")] pub warnings: ::prost::alloc::vec::Vec, + /// Update queue info + #[prost(message, optional, tag = "12")] + pub update_queue: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct ChangeAliases { /// List of actions #[prost(message, repeated, tag = "1")] pub actions: ::prost::alloc::vec::Vec, - /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + /// Wait timeout for operation commit in seconds. + /// If not specified - default value will be supplied. #[prost(uint64, optional, tag = "2")] pub timeout: ::core::option::Option, } @@ -1413,7 +1483,8 @@ pub struct ShardTransferInfo { pub from: u64, #[prost(uint64, tag = "3")] pub to: u64, - /// If `true` transfer is a synchronization of a replicas; If `false` transfer is a moving of a shard from one peer to another + /// If `true` transfer is a synchronization of a replicas; + /// If `false` transfer is a moving of a shard from one peer to another #[prost(bool, tag = "4")] pub sync: bool, } @@ -1551,7 +1622,8 @@ pub struct UpdateCollectionClusterSetupRequest { /// Name of the collection #[prost(string, tag = "1")] pub collection_name: ::prost::alloc::string::String, - /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + /// Wait timeout for operation commit in seconds. + /// If not specified - default value will be supplied. #[prost(uint64, optional, tag = "6")] pub timeout: ::core::option::Option, #[prost( @@ -1597,7 +1669,8 @@ pub struct CreateShardKeyRequest { /// Request to create shard key #[prost(message, optional, tag = "2")] pub request: ::core::option::Option, - /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + /// Wait timeout for operation commit in seconds. + /// If not specified - default value will be supplied. #[prost(uint64, optional, tag = "3")] pub timeout: ::core::option::Option, } @@ -1609,10 +1682,17 @@ pub struct DeleteShardKeyRequest { /// Request to delete shard key #[prost(message, optional, tag = "2")] pub request: ::core::option::Option, - /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + /// Wait timeout for operation commit in seconds. + /// If not specified - default value will be supplied. #[prost(uint64, optional, tag = "3")] pub timeout: ::core::option::Option, } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListShardKeysRequest { + /// Name of the collection + #[prost(string, tag = "1")] + pub collection_name: ::prost::alloc::string::String, +} #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CreateShardKeyResponse { #[prost(bool, tag = "1")] @@ -1623,6 +1703,19 @@ pub struct DeleteShardKeyResponse { #[prost(bool, tag = "1")] pub result: bool, } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ShardKeyDescription { + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListShardKeysResponse { + #[prost(message, repeated, tag = "1")] + pub shard_keys: ::prost::alloc::vec::Vec, + /// Time spent to process + #[prost(double, tag = "2")] + pub time: f64, +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Datatype { @@ -1990,11 +2083,14 @@ pub enum ReplicaState { Partial = 2, /// Collection is being created Initializing = 3, - /// A shard which receives data, but is not used for search; Useful for backup shards + /// A shard which receives data, but is not used for search. + /// Useful for backup shards. Listener = 4, - /// Deprecated: snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard + /// Deprecated: snapshot shard transfer is in progress. + /// Updates should not be sent to (and are ignored by) the shard. PartialSnapshot = 5, - /// Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true + /// Shard is undergoing recovery by an external node. + /// Normally rejects updates, accepts updates if force is true. Recovery = 6, /// Points are being migrated to this shard as part of scale-up resharding Resharding = 7, @@ -2002,6 +2098,12 @@ pub enum ReplicaState { ReshardingScaleDown = 8, /// Active for readers, Partial for writers ActiveRead = 9, + /// State for manually creation/recovery of a shard. + /// Usually when snapshot is uploaded. + /// This state is equivalent to `Partial`, except: + /// - it can't receive updates + /// - it is not treated as broken on startup + ManualRecovery = 10, } impl ReplicaState { /// String value of the enum field names used in the ProtoBuf definition. @@ -2020,6 +2122,7 @@ impl ReplicaState { Self::Resharding => "Resharding", Self::ReshardingScaleDown => "ReshardingScaleDown", Self::ActiveRead => "ActiveRead", + Self::ManualRecovery => "ManualRecovery", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -2035,11 +2138,11 @@ impl ReplicaState { "Resharding" => Some(Self::Resharding), "ReshardingScaleDown" => Some(Self::ReshardingScaleDown), "ActiveRead" => Some(Self::ActiveRead), + "ManualRecovery" => Some(Self::ManualRecovery), _ => None, } } } -/// /// Resharding direction, scale up or down in number of shards #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -2196,7 +2299,6 @@ pub mod collections_client { self.inner = self.inner.max_encoding_message_size(limit); self } - /// /// Get detailed information about specified existing collection pub async fn get( &mut self, @@ -2219,8 +2321,7 @@ pub mod collections_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Collections", "Get")); self.inner.unary(req, path, codec).await } - /// - /// Get list name of all existing collections + /// Get list of names of all existing collections pub async fn list( &mut self, request: impl tonic::IntoRequest, @@ -2242,7 +2343,6 @@ pub mod collections_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Collections", "List")); self.inner.unary(req, path, codec).await } - /// /// Create new collection with given parameters pub async fn create( &mut self, @@ -2267,7 +2367,6 @@ pub mod collections_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Collections", "Create")); self.inner.unary(req, path, codec).await } - /// /// Update parameters of the existing collection pub async fn update( &mut self, @@ -2292,7 +2391,6 @@ pub mod collections_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Collections", "Update")); self.inner.unary(req, path, codec).await } - /// /// Drop collection and all associated data pub async fn delete( &mut self, @@ -2317,7 +2415,6 @@ pub mod collections_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Collections", "Delete")); self.inner.unary(req, path, codec).await } - /// /// Update Aliases of the existing collection pub async fn update_aliases( &mut self, @@ -2343,7 +2440,6 @@ pub mod collections_client { .insert(GrpcMethod::new("qdrant.Collections", "UpdateAliases")); self.inner.unary(req, path, codec).await } - /// /// Get list of all aliases for a collection pub async fn list_collection_aliases( &mut self, @@ -2369,7 +2465,6 @@ pub mod collections_client { .insert(GrpcMethod::new("qdrant.Collections", "ListCollectionAliases")); self.inner.unary(req, path, codec).await } - /// /// Get list of all aliases for all existing collections pub async fn list_aliases( &mut self, @@ -2395,7 +2490,6 @@ pub mod collections_client { .insert(GrpcMethod::new("qdrant.Collections", "ListAliases")); self.inner.unary(req, path, codec).await } - /// /// Get cluster information for a collection pub async fn collection_cluster_info( &mut self, @@ -2421,7 +2515,6 @@ pub mod collections_client { .insert(GrpcMethod::new("qdrant.Collections", "CollectionClusterInfo")); self.inner.unary(req, path, codec).await } - /// /// Check the existence of a collection pub async fn collection_exists( &mut self, @@ -2447,7 +2540,6 @@ pub mod collections_client { .insert(GrpcMethod::new("qdrant.Collections", "CollectionExists")); self.inner.unary(req, path, codec).await } - /// /// Update cluster setup for a collection pub async fn update_collection_cluster_setup( &mut self, @@ -2475,7 +2567,6 @@ pub mod collections_client { ); self.inner.unary(req, path, codec).await } - /// /// Create shard key pub async fn create_shard_key( &mut self, @@ -2501,7 +2592,6 @@ pub mod collections_client { .insert(GrpcMethod::new("qdrant.Collections", "CreateShardKey")); self.inner.unary(req, path, codec).await } - /// /// Delete shard key pub async fn delete_shard_key( &mut self, @@ -2527,6 +2617,31 @@ pub mod collections_client { .insert(GrpcMethod::new("qdrant.Collections", "DeleteShardKey")); self.inner.unary(req, path, codec).await } + /// List shard keys + pub async fn list_shard_keys( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/qdrant.Collections/ListShardKeys", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("qdrant.Collections", "ListShardKeys")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -2542,7 +2657,6 @@ pub mod collections_server { /// Generated trait containing gRPC methods that should be implemented for use with CollectionsServer. #[async_trait] pub trait Collections: std::marker::Send + std::marker::Sync + 'static { - /// /// Get detailed information about specified existing collection async fn get( &self, @@ -2551,8 +2665,7 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// - /// Get list name of all existing collections + /// Get list of names of all existing collections async fn list( &self, request: tonic::Request, @@ -2560,7 +2673,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Create new collection with given parameters async fn create( &self, @@ -2569,7 +2681,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Update parameters of the existing collection async fn update( &self, @@ -2578,7 +2689,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Drop collection and all associated data async fn delete( &self, @@ -2587,7 +2697,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Update Aliases of the existing collection async fn update_aliases( &self, @@ -2596,7 +2705,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Get list of all aliases for a collection async fn list_collection_aliases( &self, @@ -2605,7 +2713,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Get list of all aliases for all existing collections async fn list_aliases( &self, @@ -2614,7 +2721,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Get cluster information for a collection async fn collection_cluster_info( &self, @@ -2623,7 +2729,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Check the existence of a collection async fn collection_exists( &self, @@ -2632,7 +2737,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Update cluster setup for a collection async fn update_collection_cluster_setup( &self, @@ -2641,7 +2745,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Create shard key async fn create_shard_key( &self, @@ -2650,7 +2753,6 @@ pub mod collections_server { tonic::Response, tonic::Status, >; - /// /// Delete shard key async fn delete_shard_key( &self, @@ -2659,6 +2761,14 @@ pub mod collections_server { tonic::Response, tonic::Status, >; + /// List shard keys + async fn list_shard_keys( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct CollectionsServer { @@ -3330,6 +3440,51 @@ pub mod collections_server { }; Box::pin(fut) } + "/qdrant.Collections/ListShardKeys" => { + #[allow(non_camel_case_types)] + struct ListShardKeysSvc(pub Arc); + impl< + T: Collections, + > tonic::server::UnaryService + for ListShardKeysSvc { + type Response = super::ListShardKeysResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_shard_keys(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ListShardKeysSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new(empty_body()); @@ -3386,7 +3541,8 @@ pub mod read_consistency { /// Common read consistency configurations #[prost(enumeration = "super::ReadConsistencyType", tag = "1")] Type(i32), - /// Send request to a specified number of nodes, and return points which are present on all of them + /// Send request to a specified number of nodes, + /// and return points which are present on all of them #[prost(uint64, tag = "2")] Factor(u64), } @@ -3437,7 +3593,9 @@ pub struct Vector { /// Vector data (flatten for multi vectors), deprecated #[deprecated] #[prost(float, repeated, packed = "false", tag = "1")] + /** +Deprecated since 1.16.0, use [`vector`](crate::qdrant::Vector::vector) field instead.*/ pub data: ::prost::alloc::vec::Vec, /// Sparse indices for sparse vectors, deprecated #[deprecated] @@ -3535,7 +3693,8 @@ pub struct MultiDenseVector { #[prost(message, repeated, tag = "1")] pub vectors: ::prost::alloc::vec::Vec, } -/// Vector type to be used in queries. Ids will be substituted with their corresponding vectors from the collection. +/// Vector type to be used in queries. +/// Ids will be substituted with their corresponding vectors from the collection. #[derive(Clone, PartialEq, ::prost::Message)] pub struct VectorInput { #[prost(oneof = "vector_input::Variant", tags = "1, 2, 3, 4, 5, 6, 7")] @@ -3585,9 +3744,16 @@ pub struct UpsertPoints { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "5")] pub shard_key_selector: ::core::option::Option, - /// If specified, only points that match this filter will be updated, others will be inserted + /// Filter to apply when updating existing points. Only points matching this filter will be updated. + /// Points that don't match will keep their current state. New points will be inserted regardless of the filter. #[prost(message, optional, tag = "6")] pub update_filter: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "7")] + pub timeout: ::core::option::Option, + /// Mode of the upsert operation: insert_only, upsert (default), update_only + #[prost(enumeration = "UpdateMode", optional, tag = "8")] + pub update_mode: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeletePoints { @@ -3606,6 +3772,9 @@ pub struct DeletePoints { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "5")] pub shard_key_selector: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "6")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetPoints { @@ -3651,6 +3820,9 @@ pub struct UpdatePointVectors { /// If specified, only points that match this filter will be updated #[prost(message, optional, tag = "6")] pub update_filter: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "7")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct PointVectors { @@ -3681,6 +3853,9 @@ pub struct DeletePointVectors { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "6")] pub shard_key_selector: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "7")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct SetPayloadPoints { @@ -3705,6 +3880,9 @@ pub struct SetPayloadPoints { /// Option for indicate property of payload #[prost(string, optional, tag = "8")] pub key: ::core::option::Option<::prost::alloc::string::String>, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "9")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeletePayloadPoints { @@ -3726,6 +3904,9 @@ pub struct DeletePayloadPoints { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "7")] pub shard_key_selector: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "8")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct ClearPayloadPoints { @@ -3744,6 +3925,9 @@ pub struct ClearPayloadPoints { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "5")] pub shard_key_selector: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "6")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateFieldIndexCollection { @@ -3765,6 +3949,9 @@ pub struct CreateFieldIndexCollection { /// Write ordering guarantees #[prost(message, optional, tag = "6")] pub ordering: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "7")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteFieldIndexCollection { @@ -3780,6 +3967,9 @@ pub struct DeleteFieldIndexCollection { /// Write ordering guarantees #[prost(message, optional, tag = "4")] pub ordering: ::core::option::Option, + /// Timeout for the request in seconds + #[prost(uint64, optional, tag = "5")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct PayloadIncludeSelector { @@ -3872,35 +4062,33 @@ pub mod with_vectors_selector { /// If `true` - return all vectors, if `false` - none #[prost(bool, tag = "1")] Enable(bool), - /// List of payload keys to include into result + /// List of vectors to include into result #[prost(message, tag = "2")] Include(super::VectorsSelector), } } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct QuantizationSearchParams { - /// /// If set to true, search will ignore quantized vector data #[prost(bool, optional, tag = "1")] pub ignore: ::core::option::Option, - /// - /// If true, use original vectors to re-score top-k results. If ignored, qdrant decides automatically does rescore enabled or not. + /// If true, use original vectors to re-score top-k results. + /// If ignored, qdrant decides automatically does rescore enabled or not. #[prost(bool, optional, tag = "2")] pub rescore: ::core::option::Option, - /// /// Oversampling factor for quantization. /// /// Defines how many extra vectors should be pre-selected using quantized index, /// and then re-scored using original vectors. /// - /// For example, if `oversampling` is 2.4 and `limit` is 100, then 240 vectors will be pre-selected using quantized index, + /// For example, if `oversampling` is 2.4 and `limit` is 100, + /// then 240 vectors will be pre-selected using quantized index, /// and then top-100 will be returned after re-scoring. #[prost(double, optional, tag = "3")] pub oversampling: ::core::option::Option, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct AcornSearchParams { - /// /// If true, then ACORN may be used for the HNSW search based on filters /// selectivity. /// @@ -3908,7 +4096,6 @@ pub struct AcornSearchParams { /// payload filters, at cost of performance. #[prost(bool, optional, tag = "1")] pub enable: ::core::option::Option, - /// /// Maximum selectivity of filters to enable ACORN. /// /// If estimated filters selectivity is higher than this value, @@ -3921,26 +4108,21 @@ pub struct AcornSearchParams { } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SearchParams { - /// /// Params relevant to HNSW index. Size of the beam in a beam-search. /// Larger the value - more accurate the result, more time required for search. #[prost(uint64, optional, tag = "1")] pub hnsw_ef: ::core::option::Option, - /// /// Search without approximation. If set to true, search may run long but with exact results. #[prost(bool, optional, tag = "2")] pub exact: ::core::option::Option, - /// /// If set to true, search will ignore quantized vector data #[prost(message, optional, tag = "3")] pub quantization: ::core::option::Option, - /// /// If enabled, the engine will only perform search among indexed or small segments. /// Using this option prevents slow searches in case of delayed index, but does not /// guarantee that all uploaded vectors will be included in search results #[prost(bool, optional, tag = "4")] pub indexed_only: ::core::option::Option, - /// /// ACORN search params #[prost(message, optional, tag = "5")] pub acorn: ::core::option::Option, @@ -4044,7 +4226,9 @@ pub struct SearchPointGroups { /// Options for specifying which vectors to include into response #[prost(message, optional, tag = "9")] pub with_vectors: ::core::option::Option, - /// Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups. + /// Payload field to group by, must be a string or number field. + /// If there are multiple values for the field, all of them will be used. + /// One point can be in multiple groups. #[prost(string, tag = "10")] pub group_by: ::prost::alloc::string::String, /// Maximum amount of points to return per group @@ -4245,7 +4429,9 @@ pub struct RecommendPointGroups { /// Name of the collection to use for points lookup, if not specified - use current collection #[prost(message, optional, tag = "11")] pub lookup_from: ::core::option::Option, - /// Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups. + /// Payload field to group by, must be a string or number field. + /// If there are multiple values for the field, all of them will be used. + /// One point can be in multiple groups. #[prost(string, tag = "12")] pub group_by: ::prost::alloc::string::String, /// Maximum amount of points to return per group @@ -4425,6 +4611,50 @@ pub struct ContextInput { pub pairs: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] +pub struct RelevanceFeedbackInput { + /// The original query vector + #[prost(message, optional, tag = "1")] + pub target: ::core::option::Option, + /// Previous results scored by the feedback provider. + #[prost(message, repeated, tag = "2")] + pub feedback: ::prost::alloc::vec::Vec, + /// Formula and trained coefficients to use. + #[prost(message, optional, tag = "3")] + pub strategy: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FeedbackItem { + /// The id or vector from the original model + #[prost(message, optional, tag = "1")] + pub example: ::core::option::Option, + /// Score for this vector as determined by the feedback provider + #[prost(float, tag = "2")] + pub score: f32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct FeedbackStrategy { + #[prost(oneof = "feedback_strategy::Variant", tags = "1")] + pub variant: ::core::option::Option, +} +/// Nested message and enum types in `FeedbackStrategy`. +pub mod feedback_strategy { + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Variant { + /// a * score + sim(confidence^b * c * delta) + #[prost(message, tag = "1")] + Naive(super::NaiveFeedbackStrategy), + } +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct NaiveFeedbackStrategy { + #[prost(float, tag = "1")] + pub a: f32, + #[prost(float, tag = "2")] + pub b: f32, + #[prost(float, tag = "3")] + pub c: f32, +} +#[derive(Clone, PartialEq, ::prost::Message)] pub struct Formula { #[prost(message, optional, tag = "1")] pub expression: ::core::option::Option, @@ -4542,10 +4772,13 @@ pub struct DecayParamsExpression { /// The target value to start decaying from. Defaults to 0. #[prost(message, optional, boxed, tag = "2")] pub target: ::core::option::Option<::prost::alloc::boxed::Box>, - /// The scale factor of the decay, in terms of `x`. Defaults to 1.0. Must be a non-zero positive number. + /// The scale factor of the decay, in terms of `x`. + /// Defaults to 1.0. Must be a non-zero positive number. #[prost(float, optional, tag = "3")] pub scale: ::core::option::Option, - /// The midpoint of the decay. Should be between 0 and 1. Defaults to 0.5. Output will be this value when `|x - target| == scale`. + /// The midpoint of the decay. + /// Should be between 0 and 1. Defaults to 0.5. + /// Output will be this value when `|x - target| == scale`. #[prost(float, optional, tag = "4")] pub midpoint: ::core::option::Option, } @@ -4579,15 +4812,21 @@ pub struct Mmr { pub candidates_limit: ::core::option::Option, } /// Parameterized reciprocal rank fusion -#[derive(Clone, Copy, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct Rrf { /// K parameter for reciprocal rank fusion #[prost(uint32, optional, tag = "1")] pub k: ::core::option::Option, + /// Weights for each prefetch source. + /// Higher weight gives more influence on the final ranking. + /// If not specified, all prefetches are weighted equally. + /// The number of weights should match the number of prefetches. + #[prost(float, repeated, tag = "2")] + pub weights: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Query { - #[prost(oneof = "query::Variant", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10")] + #[prost(oneof = "query::Variant", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11")] pub variant: ::core::option::Option, } /// Nested message and enum types in `Query`. @@ -4624,17 +4863,23 @@ pub mod query { /// Parameterized reciprocal rank fusion #[prost(message, tag = "10")] Rrf(super::Rrf), + /// Search with feedback from some oracle. + #[prost(message, tag = "11")] + RelevanceFeedback(super::RelevanceFeedbackInput), } } #[derive(Clone, PartialEq, ::prost::Message)] pub struct PrefetchQuery { - /// Sub-requests to perform first. If present, the query will be performed on the results of the prefetches. + /// Sub-requests to perform first. + /// If present, the query will be performed on the results of the prefetches. #[prost(message, repeated, tag = "1")] pub prefetch: ::prost::alloc::vec::Vec, - /// Query to perform. If missing, returns points ordered by their IDs. + /// Query to perform. + /// If missing, returns points ordered by their IDs. #[prost(message, optional, tag = "2")] pub query: ::core::option::Option, - /// Define which vector to use for querying. If missing, the default vector is is used. + /// Define which vector to use for querying. + /// If missing, the default vector is used. #[prost(string, optional, tag = "3")] pub using: ::core::option::Option<::prost::alloc::string::String>, /// Filter conditions - return only those points that satisfy the specified conditions. @@ -4649,7 +4894,8 @@ pub struct PrefetchQuery { /// Max number of points. Default is 10 #[prost(uint64, optional, tag = "7")] pub limit: ::core::option::Option, - /// The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector + /// The location to use for IDs lookup. + /// If not specified - use the current collection and the 'using' vector. #[prost(message, optional, tag = "8")] pub lookup_from: ::core::option::Option, } @@ -4658,13 +4904,15 @@ pub struct QueryPoints { /// Name of the collection #[prost(string, tag = "1")] pub collection_name: ::prost::alloc::string::String, - /// Sub-requests to perform first. If present, the query will be performed on the results of the prefetches. + /// Sub-requests to perform first. + /// If present, the query will be performed on the results of the prefetches. #[prost(message, repeated, tag = "2")] pub prefetch: ::prost::alloc::vec::Vec, /// Query to perform. If missing, returns points ordered by their IDs. #[prost(message, optional, tag = "3")] pub query: ::core::option::Option, - /// Define which vector to use for querying. If missing, the default vector is used. + /// Define which vector to use for querying. + /// If missing, the default vector is used. #[prost(string, optional, tag = "4")] pub using: ::core::option::Option<::prost::alloc::string::String>, /// Filter conditions - return only those points that satisfy the specified conditions. @@ -4691,10 +4939,12 @@ pub struct QueryPoints { /// Options for specifying read consistency guarantees. #[prost(message, optional, tag = "12")] pub read_consistency: ::core::option::Option, - /// Specify in which shards to look for the points, if not specified - look in all shards. + /// Specify in which shards to look for the points. + /// If not specified - look in all shards. #[prost(message, optional, tag = "13")] pub shard_key_selector: ::core::option::Option, - /// The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector + /// The location to use for IDs lookup. + /// If not specified - use the current collection and the 'using' vector. #[prost(message, optional, tag = "14")] pub lookup_from: ::core::option::Option, /// If set, overrides global timeout setting for this request. Unit is seconds. @@ -4719,13 +4969,15 @@ pub struct QueryPointGroups { /// Name of the collection #[prost(string, tag = "1")] pub collection_name: ::prost::alloc::string::String, - /// Sub-requests to perform first. If present, the query will be performed on the results of the prefetches. + /// Sub-requests to perform first. + /// If present, the query will be performed on the results of the prefetches. #[prost(message, repeated, tag = "2")] pub prefetch: ::prost::alloc::vec::Vec, /// Query to perform. If missing, returns points ordered by their IDs. #[prost(message, optional, tag = "3")] pub query: ::core::option::Option, - /// Define which vector to use for querying. If missing, the default vector is used. + /// Define which vector to use for querying. + /// If missing, the default vector is used. #[prost(string, optional, tag = "4")] pub using: ::core::option::Option<::prost::alloc::string::String>, /// Filter conditions - return only those points that satisfy the specified conditions. @@ -4743,16 +4995,19 @@ pub struct QueryPointGroups { /// Options for specifying which vectors to include into response #[prost(message, optional, tag = "9")] pub with_vectors: ::core::option::Option, - /// The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector + /// The location to use for IDs lookup. + /// If not specified - use the current collection and the 'using' vector. #[prost(message, optional, tag = "10")] pub lookup_from: ::core::option::Option, /// Max number of points. Default is 3. #[prost(uint64, optional, tag = "11")] pub limit: ::core::option::Option, - /// Maximum amount of points to return per group. Default to 10. + /// Maximum amount of points to return per group. Defaults to 10. #[prost(uint64, optional, tag = "12")] pub group_size: ::core::option::Option, - /// Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups. + /// Payload field to group by, must be a string or number field. + /// If there are multiple values for the field, all of them will be used. + /// One point can be in multiple groups. #[prost(string, tag = "13")] pub group_by: ::prost::alloc::string::String, /// Options for specifying read consistency guarantees @@ -4838,7 +5093,7 @@ pub struct SearchMatrixPoints { /// How many neighbours per sample to find. Default is 3. #[prost(uint64, optional, tag = "4")] pub limit: ::core::option::Option, - /// Define which vector to use for querying. If missing, the default vector is is used. + /// Define which vector to use for querying. If missing, the default vector is used. #[prost(string, optional, tag = "5")] pub using: ::core::option::Option<::prost::alloc::string::String>, /// If set, overrides global timeout setting for this request. Unit is seconds. @@ -4901,9 +5156,13 @@ pub mod points_update_operation { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "2")] pub shard_key_selector: ::core::option::Option, - /// If specified, only points that match this filter will be updated, others will be inserted + /// Filter to apply when updating existing points. Only points matching this filter will be updated. + /// Points that don't match will keep their current state. New points will be inserted regardless of the filter. #[prost(message, optional, tag = "3")] pub update_filter: ::core::option::Option, + /// Mode of the upsert operation: insert_only, upsert (default), update_only + #[prost(enumeration = "super::UpdateMode", optional, tag = "4")] + pub update_mode: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct SetPayload { @@ -5031,6 +5290,9 @@ pub struct UpdateBatchPoints { /// Write ordering guarantees #[prost(message, optional, tag = "4")] pub ordering: ::core::option::Option, + /// Timeout for the operation in seconds + #[prost(uint64, optional, tag = "5")] + pub timeout: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct PointsOperationResponse { @@ -5099,7 +5361,7 @@ pub struct GroupId { pub mod group_id { #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Kind { - /// Represents a double value. + /// Represents an unsigned integer value. #[prost(uint64, tag = "1")] UnsignedValue(u64), /// Represents an integer value @@ -5409,9 +5671,11 @@ pub struct HardwareUsage { pub enum WriteOrderingType { /// Write operations may be reordered, works faster, default Weak = 0, - /// Write operations go through dynamically selected leader, may be inconsistent for a short period of time in case of leader change + /// Write operations go through dynamically selected leader, + /// may be inconsistent for a short period of time in case of leader change Medium = 1, - /// Write operations go through the permanent leader, consistent, but may be unavailable if leader is down + /// Write operations go through the permanent leader, consistent, + /// but may be unavailable if leader is down Strong = 2, } impl WriteOrderingType { @@ -5436,6 +5700,39 @@ impl WriteOrderingType { } } } +/// Defines the mode of the upsert operation +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum UpdateMode { + /// Default mode - insert new points, update existing points + Upsert = 0, + /// Only insert new points, do not update existing points + InsertOnly = 1, + /// Only update existing points, do not insert new points + UpdateOnly = 2, +} +impl UpdateMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Upsert => "Upsert", + Self::InsertOnly => "InsertOnly", + Self::UpdateOnly => "UpdateOnly", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "Upsert" => Some(Self::Upsert), + "InsertOnly" => Some(Self::InsertOnly), + "UpdateOnly" => Some(Self::UpdateOnly), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ReadConsistencyType { @@ -5641,6 +5938,8 @@ pub enum UpdateStatus { Completed = 2, /// Internal: update is rejected due to an outdated clock ClockRejected = 3, + /// Timeout of awaited operations + WaitTimeout = 4, } impl UpdateStatus { /// String value of the enum field names used in the ProtoBuf definition. @@ -5653,6 +5952,7 @@ impl UpdateStatus { Self::Acknowledged => "Acknowledged", Self::Completed => "Completed", Self::ClockRejected => "ClockRejected", + Self::WaitTimeout => "WaitTimeout", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -5662,6 +5962,7 @@ impl UpdateStatus { "Acknowledged" => Some(Self::Acknowledged), "Completed" => Some(Self::Completed), "ClockRejected" => Some(Self::ClockRejected), + "WaitTimeout" => Some(Self::WaitTimeout), _ => None, } } @@ -5757,8 +6058,8 @@ pub mod points_client { self.inner = self.inner.max_encoding_message_size(limit); self } - /// - /// Perform insert + updates on points. If a point with a given ID already exists - it will be overwritten. + /// Perform insert + updates on points. + /// If a point with a given ID already exists - it will be overwritten. pub async fn upsert( &mut self, request: impl tonic::IntoRequest, @@ -5780,7 +6081,6 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Upsert")); self.inner.unary(req, path, codec).await } - /// /// Delete points pub async fn delete( &mut self, @@ -5803,7 +6103,6 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Delete")); self.inner.unary(req, path, codec).await } - /// /// Retrieve points pub async fn get( &mut self, @@ -5823,7 +6122,6 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Get")); self.inner.unary(req, path, codec).await } - /// /// Update named vectors for point pub async fn update_vectors( &mut self, @@ -5849,7 +6147,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "UpdateVectors")); self.inner.unary(req, path, codec).await } - /// /// Delete named vectors for points pub async fn delete_vectors( &mut self, @@ -5875,7 +6172,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "DeleteVectors")); self.inner.unary(req, path, codec).await } - /// /// Set payload for points pub async fn set_payload( &mut self, @@ -5898,7 +6194,6 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "SetPayload")); self.inner.unary(req, path, codec).await } - /// /// Overwrite payload for points pub async fn overwrite_payload( &mut self, @@ -5924,7 +6219,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "OverwritePayload")); self.inner.unary(req, path, codec).await } - /// /// Delete specified key payload for points pub async fn delete_payload( &mut self, @@ -5950,7 +6244,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "DeletePayload")); self.inner.unary(req, path, codec).await } - /// /// Remove all payload for specified points pub async fn clear_payload( &mut self, @@ -5976,7 +6269,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "ClearPayload")); self.inner.unary(req, path, codec).await } - /// /// Create index for field in collection pub async fn create_field_index( &mut self, @@ -6002,7 +6294,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "CreateFieldIndex")); self.inner.unary(req, path, codec).await } - /// /// Delete field index for collection pub async fn delete_field_index( &mut self, @@ -6028,8 +6319,8 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "DeleteFieldIndex")); self.inner.unary(req, path, codec).await } - /// - /// Retrieve closest points based on vector similarity and given filtering conditions + /// Retrieve closest points based on vector similarity and given filtering + /// conditions pub async fn search( &mut self, request: impl tonic::IntoRequest, @@ -6048,8 +6339,8 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Search")); self.inner.unary(req, path, codec).await } - /// - /// Retrieve closest points based on vector similarity and given filtering conditions + /// Retrieve closest points based on vector similarity and given filtering + /// conditions pub async fn search_batch( &mut self, request: impl tonic::IntoRequest, @@ -6073,8 +6364,8 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "SearchBatch")); self.inner.unary(req, path, codec).await } - /// - /// Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given field + /// Retrieve closest points based on vector similarity and given filtering + /// conditions, grouped by a given field pub async fn search_groups( &mut self, request: impl tonic::IntoRequest, @@ -6099,7 +6390,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "SearchGroups")); self.inner.unary(req, path, codec).await } - /// /// Iterate over all or filtered points pub async fn scroll( &mut self, @@ -6119,8 +6409,8 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Scroll")); self.inner.unary(req, path, codec).await } - /// - /// Look for the points which are closer to stored positive examples and at the same time further to negative examples. + /// Look for the points which are closer to stored positive examples and at + /// the same time further to negative examples. pub async fn recommend( &mut self, request: impl tonic::IntoRequest, @@ -6142,8 +6432,8 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Recommend")); self.inner.unary(req, path, codec).await } - /// - /// Look for the points which are closer to stored positive examples and at the same time further to negative examples. + /// Look for the points which are closer to stored positive examples and at + /// the same time further to negative examples. pub async fn recommend_batch( &mut self, request: impl tonic::IntoRequest, @@ -6168,8 +6458,8 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "RecommendBatch")); self.inner.unary(req, path, codec).await } - /// - /// Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field + /// Look for the points which are closer to stored positive examples and at + /// the same time further to negative examples, grouped by a given field pub async fn recommend_groups( &mut self, request: impl tonic::IntoRequest, @@ -6194,22 +6484,25 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "RecommendGroups")); self.inner.unary(req, path, codec).await } + /// Use context and a target to find the most similar points to the target, + /// constrained by the context. /// - /// Use context and a target to find the most similar points to the target, constrained by the context. - /// - /// When using only the context (without a target), a special search - called context search - is performed where - /// pairs of points are used to generate a loss that guides the search towards the zone where - /// most positive examples overlap. This means that the score minimizes the scenario of - /// finding a point closer to a negative than to a positive part of a pair. + /// When using only the context (without a target), a special search - called + /// context search - is performed where pairs of points are used to generate a + /// loss that guides the search towards the zone where most positive examples + /// overlap. This means that the score minimizes the scenario of finding a + /// point closer to a negative than to a positive part of a pair. /// - /// Since the score of a context relates to loss, the maximum score a point can get is 0.0, - /// and it becomes normal that many points can have a score of 0.0. + /// Since the score of a context relates to loss, the maximum score a point + /// can get is 0.0, and it becomes normal that many points can have a score of + /// 0.0. /// - /// When using target (with or without context), the score behaves a little different: The - /// integer part of the score represents the rank with respect to the context, while the - /// decimal part of the score relates to the distance to the target. The context part of the score for - /// each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, - /// and -1 otherwise. + /// When using target (with or without context), the score behaves a little + /// different: The integer part of the score represents the rank with respect + /// to the context, while the decimal part of the score relates to the + /// distance to the target. The context part of the score for each pair is + /// calculated +1 if the point is closer to a positive than to a negative part + /// of a pair, and -1 otherwise. pub async fn discover( &mut self, request: impl tonic::IntoRequest, @@ -6231,7 +6524,6 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Discover")); self.inner.unary(req, path, codec).await } - /// /// Batch request points based on { positive, negative } pairs of examples, and/or a target pub async fn discover_batch( &mut self, @@ -6257,7 +6549,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "DiscoverBatch")); self.inner.unary(req, path, codec).await } - /// /// Count points in collection with given filtering conditions pub async fn count( &mut self, @@ -6277,7 +6568,6 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Count")); self.inner.unary(req, path, codec).await } - /// /// Perform multiple update operations in one request pub async fn update_batch( &mut self, @@ -6302,8 +6592,9 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "UpdateBatch")); self.inner.unary(req, path, codec).await } - /// - /// Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. + /// Universally query points. + /// This endpoint covers all capabilities of search, recommend, discover, filters. + /// But also enables hybrid and multi-stage queries. pub async fn query( &mut self, request: impl tonic::IntoRequest, @@ -6322,8 +6613,9 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Query")); self.inner.unary(req, path, codec).await } - /// - /// Universally query points in a batch fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. + /// Universally query points in a batch fashion. + /// This endpoint covers all capabilities of search, recommend, discover, filters. + /// But also enables hybrid and multi-stage queries. pub async fn query_batch( &mut self, request: impl tonic::IntoRequest, @@ -6345,8 +6637,9 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "QueryBatch")); self.inner.unary(req, path, codec).await } - /// - /// Universally query points in a group fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. + /// Universally query points in a group fashion. + /// This endpoint covers all capabilities of search, recommend, discover, filters. + /// But also enables hybrid and multi-stage queries. pub async fn query_groups( &mut self, request: impl tonic::IntoRequest, @@ -6370,8 +6663,9 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "QueryGroups")); self.inner.unary(req, path, codec).await } - /// - /// Perform facet counts. For each value in the field, count the number of points that have this value and match the conditions. + /// Perform facet counts. + /// For each value in the field, count the number of points that have this + /// value and match the conditions. pub async fn facet( &mut self, request: impl tonic::IntoRequest, @@ -6390,7 +6684,6 @@ pub mod points_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Facet")); self.inner.unary(req, path, codec).await } - /// /// Compute distance matrix for sampled points with a pair based output format pub async fn search_matrix_pairs( &mut self, @@ -6416,7 +6709,6 @@ pub mod points_client { .insert(GrpcMethod::new("qdrant.Points", "SearchMatrixPairs")); self.inner.unary(req, path, codec).await } - /// /// Compute distance matrix for sampled points with an offset based output format pub async fn search_matrix_offsets( &mut self, @@ -6457,8 +6749,8 @@ pub mod points_server { /// Generated trait containing gRPC methods that should be implemented for use with PointsServer. #[async_trait] pub trait Points: std::marker::Send + std::marker::Sync + 'static { - /// - /// Perform insert + updates on points. If a point with a given ID already exists - it will be overwritten. + /// Perform insert + updates on points. + /// If a point with a given ID already exists - it will be overwritten. async fn upsert( &self, request: tonic::Request, @@ -6466,7 +6758,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Delete points async fn delete( &self, @@ -6475,13 +6766,11 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Retrieve points async fn get( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; - /// /// Update named vectors for point async fn update_vectors( &self, @@ -6490,7 +6779,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Delete named vectors for points async fn delete_vectors( &self, @@ -6499,7 +6787,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Set payload for points async fn set_payload( &self, @@ -6508,7 +6795,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Overwrite payload for points async fn overwrite_payload( &self, @@ -6517,7 +6803,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Delete specified key payload for points async fn delete_payload( &self, @@ -6526,7 +6811,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Remove all payload for specified points async fn clear_payload( &self, @@ -6535,7 +6819,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Create index for field in collection async fn create_field_index( &self, @@ -6544,7 +6827,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Delete field index for collection async fn delete_field_index( &self, @@ -6553,14 +6835,14 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// - /// Retrieve closest points based on vector similarity and given filtering conditions + /// Retrieve closest points based on vector similarity and given filtering + /// conditions async fn search( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; - /// - /// Retrieve closest points based on vector similarity and given filtering conditions + /// Retrieve closest points based on vector similarity and given filtering + /// conditions async fn search_batch( &self, request: tonic::Request, @@ -6568,8 +6850,8 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// - /// Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given field + /// Retrieve closest points based on vector similarity and given filtering + /// conditions, grouped by a given field async fn search_groups( &self, request: tonic::Request, @@ -6577,14 +6859,13 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Iterate over all or filtered points async fn scroll( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; - /// - /// Look for the points which are closer to stored positive examples and at the same time further to negative examples. + /// Look for the points which are closer to stored positive examples and at + /// the same time further to negative examples. async fn recommend( &self, request: tonic::Request, @@ -6592,8 +6873,8 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// - /// Look for the points which are closer to stored positive examples and at the same time further to negative examples. + /// Look for the points which are closer to stored positive examples and at + /// the same time further to negative examples. async fn recommend_batch( &self, request: tonic::Request, @@ -6601,8 +6882,8 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// - /// Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field + /// Look for the points which are closer to stored positive examples and at + /// the same time further to negative examples, grouped by a given field async fn recommend_groups( &self, request: tonic::Request, @@ -6610,22 +6891,25 @@ pub mod points_server { tonic::Response, tonic::Status, >; + /// Use context and a target to find the most similar points to the target, + /// constrained by the context. /// - /// Use context and a target to find the most similar points to the target, constrained by the context. - /// - /// When using only the context (without a target), a special search - called context search - is performed where - /// pairs of points are used to generate a loss that guides the search towards the zone where - /// most positive examples overlap. This means that the score minimizes the scenario of - /// finding a point closer to a negative than to a positive part of a pair. + /// When using only the context (without a target), a special search - called + /// context search - is performed where pairs of points are used to generate a + /// loss that guides the search towards the zone where most positive examples + /// overlap. This means that the score minimizes the scenario of finding a + /// point closer to a negative than to a positive part of a pair. /// - /// Since the score of a context relates to loss, the maximum score a point can get is 0.0, - /// and it becomes normal that many points can have a score of 0.0. + /// Since the score of a context relates to loss, the maximum score a point + /// can get is 0.0, and it becomes normal that many points can have a score of + /// 0.0. /// - /// When using target (with or without context), the score behaves a little different: The - /// integer part of the score represents the rank with respect to the context, while the - /// decimal part of the score relates to the distance to the target. The context part of the score for - /// each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, - /// and -1 otherwise. + /// When using target (with or without context), the score behaves a little + /// different: The integer part of the score represents the rank with respect + /// to the context, while the decimal part of the score relates to the + /// distance to the target. The context part of the score for each pair is + /// calculated +1 if the point is closer to a positive than to a negative part + /// of a pair, and -1 otherwise. async fn discover( &self, request: tonic::Request, @@ -6633,7 +6917,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Batch request points based on { positive, negative } pairs of examples, and/or a target async fn discover_batch( &self, @@ -6642,13 +6925,11 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Count points in collection with given filtering conditions async fn count( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; - /// /// Perform multiple update operations in one request async fn update_batch( &self, @@ -6657,14 +6938,16 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// - /// Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. + /// Universally query points. + /// This endpoint covers all capabilities of search, recommend, discover, filters. + /// But also enables hybrid and multi-stage queries. async fn query( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; - /// - /// Universally query points in a batch fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. + /// Universally query points in a batch fashion. + /// This endpoint covers all capabilities of search, recommend, discover, filters. + /// But also enables hybrid and multi-stage queries. async fn query_batch( &self, request: tonic::Request, @@ -6672,8 +6955,9 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// - /// Universally query points in a group fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. + /// Universally query points in a group fashion. + /// This endpoint covers all capabilities of search, recommend, discover, filters. + /// But also enables hybrid and multi-stage queries. async fn query_groups( &self, request: tonic::Request, @@ -6681,13 +6965,13 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// - /// Perform facet counts. For each value in the field, count the number of points that have this value and match the conditions. + /// Perform facet counts. + /// For each value in the field, count the number of points that have this + /// value and match the conditions. async fn facet( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; - /// /// Compute distance matrix for sampled points with a pair based output format async fn search_matrix_pairs( &self, @@ -6696,7 +6980,6 @@ pub mod points_server { tonic::Response, tonic::Status, >; - /// /// Compute distance matrix for sampled points with an offset based output format async fn search_matrix_offsets( &self, @@ -8205,7 +8488,6 @@ pub mod snapshots_client { self.inner = self.inner.max_encoding_message_size(limit); self } - /// /// Create collection snapshot pub async fn create( &mut self, @@ -8228,7 +8510,6 @@ pub mod snapshots_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Snapshots", "Create")); self.inner.unary(req, path, codec).await } - /// /// List collection snapshots pub async fn list( &mut self, @@ -8251,7 +8532,6 @@ pub mod snapshots_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Snapshots", "List")); self.inner.unary(req, path, codec).await } - /// /// Delete collection snapshot pub async fn delete( &mut self, @@ -8274,7 +8554,6 @@ pub mod snapshots_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Snapshots", "Delete")); self.inner.unary(req, path, codec).await } - /// /// Create full storage snapshot pub async fn create_full( &mut self, @@ -8300,7 +8579,6 @@ pub mod snapshots_client { .insert(GrpcMethod::new("qdrant.Snapshots", "CreateFull")); self.inner.unary(req, path, codec).await } - /// /// List full storage snapshots pub async fn list_full( &mut self, @@ -8325,7 +8603,6 @@ pub mod snapshots_client { req.extensions_mut().insert(GrpcMethod::new("qdrant.Snapshots", "ListFull")); self.inner.unary(req, path, codec).await } - /// /// Delete full storage snapshot pub async fn delete_full( &mut self, @@ -8366,7 +8643,6 @@ pub mod snapshots_server { /// Generated trait containing gRPC methods that should be implemented for use with SnapshotsServer. #[async_trait] pub trait Snapshots: std::marker::Send + std::marker::Sync + 'static { - /// /// Create collection snapshot async fn create( &self, @@ -8375,7 +8651,6 @@ pub mod snapshots_server { tonic::Response, tonic::Status, >; - /// /// List collection snapshots async fn list( &self, @@ -8384,7 +8659,6 @@ pub mod snapshots_server { tonic::Response, tonic::Status, >; - /// /// Delete collection snapshot async fn delete( &self, @@ -8393,7 +8667,6 @@ pub mod snapshots_server { tonic::Response, tonic::Status, >; - /// /// Create full storage snapshot async fn create_full( &self, @@ -8402,7 +8675,6 @@ pub mod snapshots_server { tonic::Response, tonic::Status, >; - /// /// List full storage snapshots async fn list_full( &self, @@ -8411,7 +8683,6 @@ pub mod snapshots_server { tonic::Response, tonic::Status, >; - /// /// Delete full storage snapshot async fn delete_full( &self, diff --git a/src/qdrant_client/builders/query.rs b/src/qdrant_client/builders/query.rs index a09a2430..3235ec5a 100644 --- a/src/qdrant_client/builders/query.rs +++ b/src/qdrant_client/builders/query.rs @@ -2,7 +2,7 @@ use crate::qdrant::{ ContextInput, ContextInputBuilder, ContextInputPairBuilder, DiscoverInput, DiscoverInputBuilder, Formula, Mmr, NearestInputWithMmr, OrderBy, OrderByBuilder, PrefetchQuery, PrefetchQueryBuilder, Query, QueryPointGroupsBuilder, QueryPointsBuilder, - RecommendInput, RecommendInputBuilder, Rrf, VectorInput, + RecommendInput, RecommendInputBuilder, RelevanceFeedbackInput, Rrf, VectorInput, }; impl QueryPointsBuilder { @@ -98,6 +98,14 @@ impl Query { variant: Some(crate::qdrant::query::Variant::Sample(value.into())), } } + + pub fn new_relevance_feedback(value: impl Into) -> Self { + Self { + variant: Some(crate::qdrant::query::Variant::RelevanceFeedback( + value.into(), + )), + } + } } impl RecommendInputBuilder { diff --git a/src/qdrant_client/conversions/query.rs b/src/qdrant_client/conversions/query.rs index 669c0539..ae3b9e57 100644 --- a/src/qdrant_client/conversions/query.rs +++ b/src/qdrant_client/conversions/query.rs @@ -1,6 +1,6 @@ use crate::qdrant::{ query, ContextInput, DiscoverInput, Formula, FormulaBuilder, Fusion, OrderBy, OrderByBuilder, - PointId, Query, RecommendInput, VectorInput, + PointId, Query, RecommendInput, RelevanceFeedbackInput, VectorInput, }; impl From for Query { @@ -51,6 +51,14 @@ impl From for Query { } } +impl From for Query { + fn from(value: RelevanceFeedbackInput) -> Self { + Self { + variant: Some(query::Variant::RelevanceFeedback(value)), + } + } +} + impl From for Query { fn from(value: Formula) -> Self { Self { diff --git a/src/qdrant_client/sharding_keys.rs b/src/qdrant_client/sharding_keys.rs index 259ecc41..387880fc 100644 --- a/src/qdrant_client/sharding_keys.rs +++ b/src/qdrant_client/sharding_keys.rs @@ -1,5 +1,6 @@ use crate::qdrant::{ CreateShardKeyRequest, CreateShardKeyResponse, DeleteShardKeyRequest, DeleteShardKeyResponse, + ListShardKeysRequest, ListShardKeysResponse, }; use crate::qdrant_client::{Qdrant, QdrantResult}; @@ -44,6 +45,31 @@ impl Qdrant { .await } + /// List all shard keys in a collection. + /// + /// ```no_run + ///# use qdrant_client::{Qdrant, QdrantError}; + ///# async fn list_shard_keys(client: &Qdrant) + ///# -> Result<(), QdrantError> { + /// let response = client.list_shard_keys("my_collection").await?; + ///# Ok(()) + ///# } + /// ``` + /// + /// Documentation: + pub async fn list_shard_keys( + &self, + request: impl Into, + ) -> QdrantResult { + let request = &request.into(); + + self.with_collections_client(|mut collection_api| async move { + let result = collection_api.list_shard_keys(request.clone()).await?; + Ok(result.into_inner()) + }) + .await + } + /// Delete existing shard key from a collection. /// /// Deleting a shard key destroys all shards and data placed in it. diff --git a/src/serde.rs b/src/serde_impl.rs similarity index 86% rename from src/serde.rs rename to src/serde_impl.rs index e5ee228f..b759e2de 100644 --- a/src/serde.rs +++ b/src/serde_impl.rs @@ -1,7 +1,4 @@ -#![allow(deprecated)] - use std::collections::HashMap; -use std::fmt::{Display, Formatter}; use serde::ser::{SerializeMap, SerializeSeq}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -9,24 +6,6 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::qdrant::value::Kind; use crate::qdrant::{ListValue, Struct, Value}; -#[derive(Debug)] -#[deprecated( - since = "1.10.0", - note = "use `qdrant_client::Error::JsonToPayload` error variant instead" -)] -#[allow(dead_code)] -pub struct PayloadConversionError(serde_json::Value); - -impl Display for PayloadConversionError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "Failed to convert json {} to payload: expected object at the top level", - self.0 - ) - } -} - impl Serialize for Value { fn serialize(&self, serializer: S) -> Result where diff --git a/tests/snippet_tests/mod.rs b/tests/snippet_tests/mod.rs index 453928c4..decfd244 100644 --- a/tests/snippet_tests/mod.rs +++ b/tests/snippet_tests/mod.rs @@ -27,12 +27,14 @@ mod test_get_collections; mod test_get_collections_aliases; mod test_get_points; mod test_list_full_snapshots; +mod test_list_shard_keys; mod test_list_snapshots; mod test_overwrite_payload; mod test_query_document; mod test_query_image; mod test_query_points; mod test_query_points_groups; +mod test_query_points_relevance_feedback; mod test_recommend_batch_points; mod test_recommend_point_groups; mod test_recommend_points; @@ -53,4 +55,5 @@ mod test_upsert_document; mod test_upsert_image; mod test_upsert_points; mod test_upsert_points_fallback_shard_key; +mod test_upsert_points_insert_only; mod test_upsert_points_with_condition; \ No newline at end of file diff --git a/tests/snippet_tests/test_list_shard_keys.rs b/tests/snippet_tests/test_list_shard_keys.rs new file mode 100644 index 00000000..cb36853c --- /dev/null +++ b/tests/snippet_tests/test_list_shard_keys.rs @@ -0,0 +1,15 @@ + +#[tokio::test] +async fn test_list_shard_keys() { + async fn list_shard_keys() -> Result<(), Box> { + // WARNING: This is a generated test snippet. + // Please, modify the snippet in the `../snippets/list_shard_keys.rs` file + use qdrant_client::Qdrant; + + let client = Qdrant::from_url("http://localhost:6334").build()?; + + let _response = client.list_shard_keys("{collection_name}").await?; + Ok(()) + } + let _ = list_shard_keys().await; +} diff --git a/tests/snippet_tests/test_query_points.rs b/tests/snippet_tests/test_query_points.rs index 43e3bbdd..3edaae80 100644 --- a/tests/snippet_tests/test_query_points.rs +++ b/tests/snippet_tests/test_query_points.rs @@ -5,10 +5,11 @@ async fn test_query_points() { // WARNING: This is a generated test snippet. // Please, modify the snippet in the `../snippets/query_points.rs` file use qdrant_client::qdrant::{ - AcornSearchParamsBuilder, Condition, DecayParamsExpressionBuilder, Expression, Filter, - FormulaBuilder, Fusion, GeoPoint, PointId, PrefetchQueryBuilder, Query, QueryPointsBuilder, - RecommendInputBuilder, RrfBuilder, Sample, SearchParamsBuilder, ShardKey, - ShardKeySelectorBuilder, + AcornSearchParamsBuilder, Condition, DecayParamsExpressionBuilder, Expression, + Filter, FormulaBuilder, Fusion, GeoPoint, + PointId, PrefetchQueryBuilder, Query, QueryPointsBuilder, + RecommendInputBuilder, RrfBuilder, Sample, + SearchParamsBuilder, ShardKey, ShardKeySelectorBuilder, }; use qdrant_client::Qdrant; diff --git a/tests/snippet_tests/test_query_points_relevance_feedback.rs b/tests/snippet_tests/test_query_points_relevance_feedback.rs new file mode 100644 index 00000000..64f5bcf8 --- /dev/null +++ b/tests/snippet_tests/test_query_points_relevance_feedback.rs @@ -0,0 +1,29 @@ + +#[tokio::test] +async fn test_query_points_relevance_feedback() { + async fn query_points_relevance_feedback() -> Result<(), Box> { + // WARNING: This is a generated test snippet. + // Please, modify the snippet in the `../snippets/query_points_relevance_feedback.rs` file + use qdrant_client::qdrant::{ + FeedbackItemBuilder, FeedbackStrategyBuilder, PointId, Query, QueryPointsBuilder, + RelevanceFeedbackInputBuilder, VectorInput, + }; + use qdrant_client::Qdrant; + + let client = Qdrant::from_url("http://localhost:6334").build()?; + + // Relevance feedback query (as of 1.17.0) + let _feedback = client.query( + QueryPointsBuilder::new("{collection_name}") + .query(Query::new_relevance_feedback( + RelevanceFeedbackInputBuilder::new(vec![0.01, 0.45, 0.67]) + .add_feedback(FeedbackItemBuilder::new(VectorInput::new_id(PointId::from(42)), 0.9)) + .add_feedback(FeedbackItemBuilder::new(VectorInput::new_id(PointId::from(7)), 0.1)) + .strategy(FeedbackStrategyBuilder::naive(1.0, 1.0, 1.0)) + )) + .limit(10u64) + ).await?; + Ok(()) + } + let _ = query_points_relevance_feedback().await; +} diff --git a/tests/snippet_tests/test_upsert_points_insert_only.rs b/tests/snippet_tests/test_upsert_points_insert_only.rs new file mode 100644 index 00000000..9e5bb799 --- /dev/null +++ b/tests/snippet_tests/test_upsert_points_insert_only.rs @@ -0,0 +1,34 @@ + +#[tokio::test] +async fn test_upsert_points_insert_only() { + async fn upsert_points_insert_only() -> Result<(), Box> { + // WARNING: This is a generated test snippet. + // Please, modify the snippet in the `../snippets/upsert_points_insert_only.rs` file + use qdrant_client::qdrant::{PointStruct, UpdateMode, UpsertPointsBuilder}; + use qdrant_client::{Payload, Qdrant}; + use serde_json::json; + + let client = Qdrant::from_url("http://localhost:6334").build()?; + + let points = vec![PointStruct::new( + 1, + vec![0.05, 0.61, 0.76, 0.74], + Payload::try_from(json!({ + "city": "Berlin", + "price": 1.99, + })) + .unwrap(), + )]; + + // Only insert new points, do not update existing ones + client + .upsert_points( + UpsertPointsBuilder::new("{collection_name}", points) + .wait(true) + .update_mode(UpdateMode::InsertOnly), + ) + .await?; + Ok(()) + } + let _ = upsert_points_insert_only().await; +} diff --git a/tests/snippets/list_shard_keys.rs b/tests/snippets/list_shard_keys.rs new file mode 100644 index 00000000..2e0dd0e6 --- /dev/null +++ b/tests/snippets/list_shard_keys.rs @@ -0,0 +1,5 @@ +use qdrant_client::Qdrant; + +let client = Qdrant::from_url("http://localhost:6334").build()?; + +let _response = client.list_shard_keys("{collection_name}").await?; diff --git a/tests/snippets/query_points.rs b/tests/snippets/query_points.rs index dc36f324..307a46fc 100644 --- a/tests/snippets/query_points.rs +++ b/tests/snippets/query_points.rs @@ -1,8 +1,9 @@ use qdrant_client::qdrant::{ - AcornSearchParamsBuilder, Condition, DecayParamsExpressionBuilder, Expression, Filter, - FormulaBuilder, Fusion, GeoPoint, PointId, PrefetchQueryBuilder, Query, QueryPointsBuilder, - RecommendInputBuilder, RrfBuilder, Sample, SearchParamsBuilder, ShardKey, - ShardKeySelectorBuilder, + AcornSearchParamsBuilder, Condition, DecayParamsExpressionBuilder, Expression, + Filter, FormulaBuilder, Fusion, GeoPoint, + PointId, PrefetchQueryBuilder, Query, QueryPointsBuilder, + RecommendInputBuilder, RrfBuilder, Sample, + SearchParamsBuilder, ShardKey, ShardKeySelectorBuilder, }; use qdrant_client::Qdrant; diff --git a/tests/snippets/query_points_relevance_feedback.rs b/tests/snippets/query_points_relevance_feedback.rs new file mode 100644 index 00000000..f4433c50 --- /dev/null +++ b/tests/snippets/query_points_relevance_feedback.rs @@ -0,0 +1,19 @@ +use qdrant_client::qdrant::{ + FeedbackItemBuilder, FeedbackStrategyBuilder, PointId, Query, QueryPointsBuilder, + RelevanceFeedbackInputBuilder, VectorInput, +}; +use qdrant_client::Qdrant; + +let client = Qdrant::from_url("http://localhost:6334").build()?; + +// Relevance feedback query (as of 1.17.0) +let _feedback = client.query( + QueryPointsBuilder::new("{collection_name}") + .query(Query::new_relevance_feedback( + RelevanceFeedbackInputBuilder::new(vec![0.01, 0.45, 0.67]) + .add_feedback(FeedbackItemBuilder::new(VectorInput::new_id(PointId::from(42)), 0.9)) + .add_feedback(FeedbackItemBuilder::new(VectorInput::new_id(PointId::from(7)), 0.1)) + .strategy(FeedbackStrategyBuilder::naive(1.0, 1.0, 1.0)) + )) + .limit(10u64) +).await?; diff --git a/tests/snippets/upsert_points_insert_only.rs b/tests/snippets/upsert_points_insert_only.rs new file mode 100644 index 00000000..d844971c --- /dev/null +++ b/tests/snippets/upsert_points_insert_only.rs @@ -0,0 +1,24 @@ +use qdrant_client::qdrant::{PointStruct, UpdateMode, UpsertPointsBuilder}; +use qdrant_client::{Payload, Qdrant}; +use serde_json::json; + +let client = Qdrant::from_url("http://localhost:6334").build()?; + +let points = vec![PointStruct::new( + 1, + vec![0.05, 0.61, 0.76, 0.74], + Payload::try_from(json!({ + "city": "Berlin", + "price": 1.99, + })) + .unwrap(), +)]; + +// Only insert new points, do not update existing ones +client + .upsert_points( + UpsertPointsBuilder::new("{collection_name}", points) + .wait(true) + .update_mode(UpdateMode::InsertOnly), + ) + .await?; diff --git a/tools/sync_proto.sh b/tools/sync_proto.sh index c48f8bf1..b579ddfc 100755 --- a/tools/sync_proto.sh +++ b/tools/sync_proto.sh @@ -27,6 +27,7 @@ rm $CLIENT_DIR/qdrant_internal_service.proto rm $CLIENT_DIR/raft_service.proto rm $CLIENT_DIR/shard_snapshots_service.proto rm $CLIENT_DIR/health_check.proto +rm $CLIENT_DIR/telemetry_internal.proto cat $CLIENT_DIR/qdrant.proto \ | grep -v 'collections_internal_service.proto' \ @@ -35,6 +36,7 @@ cat $CLIENT_DIR/qdrant.proto \ | grep -v 'raft_service.proto' \ | grep -v 'shard_snapshots_service.proto' \ | grep -v 'health_check.proto' \ + | grep -v 'telemetry_internal.proto' \ > $CLIENT_DIR/qdrant_tmp.proto mv $CLIENT_DIR/qdrant_tmp.proto $CLIENT_DIR/qdrant.proto