C++
Contents
C++¶
This section presents examples that use the SmartRedis C++ API to
interact with the RedisAI tensor, model, and script data types.
Additionally, this section demonstrates an example of utilizing
the SmartRedis DataSet
API.
Note
The C++ API examples rely on the SSDB
environment
variable being set to the address and port of the Redis database.
Note
The C++ API examples are written to connect to a clustered database
or clustered SmartSim Orchestrator.
Update the Client
constructor cluster
flag to false
to connect to a single shard (single compute host) database.
Tensors¶
The following example shows how to send and receive a tensor using the SmartRedis C++ client API.
1/*
2 * BSD 2-Clause License
3 *
4 * Copyright (c) 2021-2022, Hewlett Packard Enterprise
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice, this
11 * list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "client.h"
30#include <vector>
31#include <string>
32
33int main(int argc, char* argv[]) {
34
35 // Initialize tensor dimensions
36 size_t dim1 = 3;
37 size_t dim2 = 2;
38 size_t dim3 = 5;
39 std::vector<size_t> dims = {3, 2, 5};
40
41 // Initialize a tensor to random values. Note that a dynamically
42 // allocated tensor via malloc is also useable with the client
43 // API. The std::vector is used here for brevity.
44 size_t n_values = dim1 * dim2 * dim3;
45 std::vector<double> input_tensor(n_values, 0);
46 for(size_t i=0; i<n_values; i++)
47 input_tensor[i] = 2.0*rand()/RAND_MAX - 1.0;
48
49 // Initialize a SmartRedis client
50 bool cluster_mode = true; // Set to false if not using a clustered database
51 SmartRedis::Client client(cluster_mode);
52
53 // Put the tensor in the database
54 std::string key = "3d_tensor";
55 client.put_tensor(key, input_tensor.data(), dims,
56 SRTensorTypeDouble, SRMemLayoutContiguous);
57
58 // Retrieve the tensor from the database using the unpack feature.
59 std::vector<double> unpack_tensor(n_values, 0);
60 client.unpack_tensor(key, unpack_tensor.data(), {n_values},
61 SRTensorTypeDouble, SRMemLayoutContiguous);
62
63 // Print the values retrieved with the unpack feature
64 std::cout<<"Comparison of the sent and "\
65 "retrieved (via unpack) values: "<<std::endl;
66 for(size_t i=0; i<n_values; i++)
67 std::cout<<"Sent: "<<input_tensor[i]<<" "
68 <<"Received: "<<unpack_tensor[i]<<std::endl;
69
70
71 // Retrieve the tensor from the database using the get feature.
72 SRTensorType get_type;
73 std::vector<size_t> get_dims;
74 void* get_tensor;
75 client.get_tensor(key, get_tensor, get_dims, get_type, SRMemLayoutNested);
76
77 // Print the values retrieved with the unpack feature
78 std::cout<<"Comparison of the sent and "\
79 "retrieved (via get) values: "<<std::endl;
80 for(size_t i=0, c=0; i<dims[0]; i++)
81 for(size_t j=0; j<dims[1]; j++)
82 for(size_t k=0; k<dims[2]; k++, c++) {
83 std::cout<<"Sent: "<<input_tensor[c]<<" "
84 <<"Received: "
85 <<((double***)get_tensor)[i][j][k]<<std::endl;
86 }
87
88 return 0;
89}
DataSets¶
The C++ client can store and retrieve tensors and metadata in datasets. For further information about datasets, please refer to the Dataset section of the Data Structures documentation page.
The code below shows how to store and retrieve tensors and metadata
that belong to a DataSet
.
1/*
2 * BSD 2-Clause License
3 *
4 * Copyright (c) 2021-2022, Hewlett Packard Enterprise
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice, this
11 * list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "client.h"
30#include <vector>
31#include <string>
32
33int main(int argc, char* argv[]) {
34
35 // Initialize tensor dimensions
36 size_t dim1 = 3;
37 size_t dim2 = 2;
38 size_t dim3 = 5;
39 size_t n_values = dim1 * dim2 * dim3;
40 std::vector<size_t> dims = {3, 2, 5};
41
42 // Initialize two tensors to random values
43 std::vector<double> tensor_1(n_values, 0);
44 std::vector<int64_t> tensor_2(n_values, 0);
45
46 for(size_t i=0; i<n_values; i++) {
47 tensor_1[i] = 2.0*rand()/RAND_MAX - 1.0;
48 tensor_2[i] = rand();
49 }
50
51 // Initialize three metadata values we will add
52 // to the DataSet
53 uint32_t meta_scalar_1 = 1;
54 uint32_t meta_scalar_2 = 2;
55 int64_t meta_scalar_3 = 3;
56
57 // Initialize a SmartRedis client
58 bool cluster_mode = true; // Set to false if not using a clustered database
59 SmartRedis::Client client(cluster_mode);
60
61 // Create a DataSet
62 SmartRedis::DataSet dataset("example_dataset");
63
64 // Add tensors to the DataSet
65 dataset.add_tensor("tensor_1", tensor_1.data(), dims,
66 SRTensorTypeDouble, SRMemLayoutContiguous);
67
68 dataset.add_tensor("tensor_2", tensor_2.data(), dims,
69 SRTensorTypeInt64, SRMemLayoutContiguous);
70
71 // Add metadata scalar values to the DataSet
72 dataset.add_meta_scalar("meta_field_1", &meta_scalar_1, SRMetadataTypeUint32);
73 dataset.add_meta_scalar("meta_field_1", &meta_scalar_2, SRMetadataTypeUint32);
74 dataset.add_meta_scalar("meta_field_2", &meta_scalar_3, SRMetadataTypeInt64);
75
76
77 // Put the DataSet in the database
78 client.put_dataset(dataset);
79
80 // Retrieve the DataSet from the database
81 SmartRedis::DataSet retrieved_dataset =
82 client.get_dataset("example_dataset");
83
84 // Retrieve one of the tensors
85 std::vector<int64_t> unpack_dataset_tensor(n_values, 0);
86 retrieved_dataset.unpack_tensor("tensor_2",
87 unpack_dataset_tensor.data(),
88 {n_values},
89 SRTensorTypeInt64,
90 SRMemLayoutContiguous);
91
92 // Print out the retrieved values
93 std::cout<<"Comparing sent and received "\
94 "values for tensor_2: "<<std::endl;
95
96 for(size_t i=0; i<n_values; i++)
97 std::cout<<"Sent: "<<tensor_2[i]<<" "
98 <<"Received: "
99 <<unpack_dataset_tensor[i]<<std::endl;
100
101 //Retrieve a metadata field
102 size_t get_n_meta_values;
103 void* get_meta_values;
104 SRMetaDataType get_type;
105 dataset.get_meta_scalars("meta_field_1",
106 get_meta_values,
107 get_n_meta_values,
108 get_type);
109
110 // Print out the metadata field values
111 for(size_t i=0; i<get_n_meta_values; i++)
112 std::cout<<"meta_field_1 value "<<i<<" = "
113 <<((uint32_t*)get_meta_values)[i]<<std::endl;
114
115 return 0;
116}
Models¶
The following example shows how to store and use a DL model
in the database with the C++ Client. The model is stored as a file
in the ../../../common/mnist_data/
path relative to the
compiled executable. Note that this example also sets and
executes a preprocessing script.
1/*
2 * BSD 2-Clause License
3 *
4 * Copyright (c) 2021-2022, Hewlett Packard Enterprise
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice, this
11 * list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "client.h"
30#include <vector>
31#include <fstream>
32
33int main(int argc, char* argv[]) {
34
35 // Initialize a vector that will hold input image tensor
36 size_t n_values = 1*1*28*28;
37 std::vector<float> img(n_values, 0);
38
39 // Load the mnist image from a file
40 std::string image_file = "../../../common/mnist_data/one.raw";
41 std::ifstream fin(image_file, std::ios::binary);
42 std::ostringstream ostream;
43 ostream << fin.rdbuf();
44 fin.close();
45
46 const std::string tmp = ostream.str();
47 std::memcpy(img.data(), tmp.data(), img.size()*sizeof(float));
48
49 // Initialize a SmartRedis client to connect to the Redis database
50 bool cluster_mode = true; // Set to false if not using a clustered database
51 SmartRedis::Client client(cluster_mode);
52
53 // Use the client to set a model in the database from a file
54 std::string model_key = "mnist_model";
55 std::string model_file = "../../../common/mnist_data/mnist_cnn.pt";
56 client.set_model_from_file(model_key, model_file, "TORCH", "CPU", 20);
57
58 // Use the client to set a script from the database form a file
59 std::string script_key = "mnist_script";
60 std::string script_file = "../../../common/mnist_data/data_processing_script.txt";
61 client.set_script_from_file(script_key, "CPU", script_file);
62
63 // Declare keys that we will use in forthcoming client commands
64 std::string in_key = "mnist_input";
65 std::string script_out_key = "mnist_processed_input";
66 std::string out_key = "mnist_output";
67
68 // Put the tensor into the database that was loaded from file
69 client.put_tensor(in_key, img.data(), {1,1,28,28},
70 SRTensorTypeFloat, SRMemLayoutContiguous);
71
72
73 // Run the preprocessing script on the input tensor
74 client.run_script("mnist_script", "pre_process", {in_key}, {script_out_key});
75
76 // Run the model using the output of the preprocessing script
77 client.run_model("mnist_model", {script_out_key}, {out_key});
78
79 // Retrieve the output of the model
80 std::vector<float> result(10, 0);
81 client.unpack_tensor(out_key, result.data(), {10},
82 SRTensorTypeFloat, SRMemLayoutContiguous);
83
84 // Print out the results of the model evaluation
85 for(size_t i=0; i<result.size(); i++) {
86 std::cout<<"Result["<<i<<"] = "<<result[i]<<std::endl;
87 }
88
89 return 0;
90}
Scripts¶
The example in Models shows how to store and use
a PyTorch script in the database with the C++ Client.
The script is stored as a file
in the ../../../common/mnist_data/
path relative to the
compiled executable. Note that this example also sets and
executes a PyTorch model.
Parallel (MPI) execution¶
In this example, the example shown in Models and Scripts is adapted to run in parallel using MPI. This example has the same functionality, however, it shows how keys can be prefixed to prevent key collisions across MPI ranks. Note that only one model and script are set, which is shared across all ranks.
For completeness, the pre-processing script source code is also shown.
C++ program
1/*
2 * BSD 2-Clause License
3 *
4 * Copyright (c) 2021-2022, Hewlett Packard Enterprise
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice, this
11 * list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "client.h"
30#include <mpi.h>
31
32void run_mnist(const std::string& model_name,
33 const std::string& script_name,
34 SmartRedis::Client& client)
35{
36 // Get the MPI rank
37 int rank;
38 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
39
40 // Initialize a vector that will hold input image tensor
41 size_t n_values = 1*1*28*28;
42 std::vector<float> img(n_values, 0);
43
44 // Load the mnist image from a file using MPI rank 0
45 if (rank == 0) {
46 std::string image_file = "../../../common/mnist_data/one.raw";
47 std::ifstream fin(image_file, std::ios::binary);
48 std::ostringstream ostream;
49 ostream << fin.rdbuf();
50 fin.close();
51
52 const std::string tmp = ostream.str();
53 std::memcpy(img.data(), tmp.data(), img.size()*sizeof(float));
54 }
55
56 // Broadcast the image to all MPI ranks. This is more efficient
57 // thank all ranks loading the same file. This is specific
58 // to this example.
59 MPI_Bcast(img.data(), 28*28, MPI_FLOAT, 0, MPI_COMM_WORLD);
60 MPI_Barrier(MPI_COMM_WORLD);
61
62 if(rank==0)
63 std::cout<<"All ranks have MNIST image"<<std::endl;
64
65 // Declare keys that we will use in forthcoming client commands
66 std::string in_key = "mnist_input_rank_" + std::to_string(rank);
67 std::string script_out_key = "mnist_processed_input_rank_" +
68 std::to_string(rank);
69 std::string out_key = "mnist_output_rank_" + std::to_string(rank);
70
71 // Put the image tensor on the database
72 client.put_tensor(in_key, img.data(), {1,1,28,28},
73 SRTensorTypeFloat, SRMemLayoutContiguous);
74
75 // Run the preprocessing script
76 client.run_script(script_name, "pre_process",
77 {in_key}, {script_out_key});
78
79 // Run the model
80 client.run_model(model_name, {script_out_key}, {out_key});
81
82 // Get the result of the model
83 std::vector<float> result(1*10);
84 client.unpack_tensor(out_key, result.data(), {10},
85 SRTensorTypeFloat, SRMemLayoutContiguous);
86
87 // Print out the results of the model for Rank 0
88 if (rank == 0)
89 for(size_t i=0; i<result.size(); i++)
90 std::cout<<"Rank 0: Result["<<i<<"] = "<<result[i]<<std::endl;
91
92 return;
93}
94
95int main(int argc, char* argv[]) {
96
97 // Initialize the MPI comm world
98 MPI_Init(&argc, &argv);
99
100 // Retrieve the MPI rank
101 int rank;
102 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
103
104 // Initialize a Client object
105 bool cluster_mode = true; // Set to false if not using a clustered database
106 SmartRedis::Client client(cluster_mode);
107
108 // Set the model and script that will be used by all ranks
109 // from MPI rank 0.
110 if (rank == 0) {
111 // Build model key, file name, and then set model
112 // from file using client API
113 std::string model_key = "mnist_model";
114 std::string model_file = "../../../"\
115 "common/mnist_data/mnist_cnn.pt";
116 client.set_model_from_file(model_key, model_file,
117 "TORCH", "CPU", 20);
118
119 // Build script key, file name, and then set script
120 // from file using client API
121 std::string script_key = "mnist_script";
122 std::string script_file = "../../../common/mnist_data/"
123 "data_processing_script.txt";
124 client.set_script_from_file(script_key, "CPU", script_file);
125
126 // Get model and script to illustrate client API
127 // functionality, but this is not necessary for this example.
128 std::string_view model = client.get_model(model_key);
129 std::string_view script = client.get_script(script_key);
130 }
131
132 // Run the MNIST model
133 MPI_Barrier(MPI_COMM_WORLD);
134 run_mnist("mnist_model", "mnist_script", client);
135
136 if (rank == 0)
137 std::cout<<"Finished SmartRedis MNIST example."<<std::endl;
138
139 // Finalize MPI Comm World
140 MPI_Finalize();
141
142 return 0;
143}
Python Pre-Processing
1def pre_process(inp):
2 mean = torch.zeros(1).float().to(inp.device)
3 mean[0] = 2.0
4 temp = inp.float() * mean
5 return temp