-
Notifications
You must be signed in to change notification settings - Fork 313
Expand file tree
/
Copy pathCppConsoleDesktop.cpp
More file actions
202 lines (163 loc) · 8.28 KB
/
CppConsoleDesktop.cpp
File metadata and controls
202 lines (163 loc) · 8.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE.md in the repo root for license information.
#include <iostream>
#include <string>
#include <vector>
#include <filesystem>
#include <winrt/base.h>
#include <winrt/Windows.Foundation.h>
#include <winrt/Windows.Foundation.Collections.h>
#include "WindowsMLShared.h"
using namespace winrt::Windows::Foundation;
using namespace WindowsML::Shared;
IAsyncAction RunInferenceAsync(const CommandLineOptions& options)
{
try
{
// Initialize WinRT apartment and providers
ExecutionProviderManager epManager;
epManager.InitializeProviders(options.download_packages);
// Create ONNX environment and session options
auto env = Ort::Env();
std::string versionStr = Ort::GetVersionString();
std::wcout << L"ONNX Runtime Version: " << std::wstring(versionStr.begin(), versionStr.end()).c_str() << std::endl;
auto sessionOptions = InferenceEngine::CreateSessionOptions(options, env);
// Determine model paths - look in executable directory (files are copied during build)
std::wstring executablePath = ModelManager::GetExecutablePath();
std::filesystem::path executableFolder = std::filesystem::path(executablePath).parent_path();
std::filesystem::path modelPath;
std::filesystem::path labelsPath;
if (options.use_model_catalog)
{
// Build source
std::wcout << L"Using model catalog..." << std::endl;
auto sampleCatalogJsonPath = executableFolder / L"SqueezeNetModelCatalog.json";
auto uri = winrt::Windows::Foundation::Uri(sampleCatalogJsonPath.c_str());
auto sampleCatalogSource = winrt::Microsoft::Windows::AI::MachineLearning::ModelCatalogSource::CreateFromUriAsync(uri).get();
winrt::Microsoft::Windows::AI::MachineLearning::ModelCatalog modelCatalog({sampleCatalogSource});
// Use intelligent model variant selection based on execution provider and device capabilities
ModelVariant actualVariant = ModelManager::DetermineModelVariant(options, env);
winrt::Microsoft::Windows::AI::MachineLearning::CatalogModelInfo modelFromCatalog{nullptr};
std::wstring modelVariantName = (actualVariant == ModelVariant::FP32) ? L"squeezenet-fp32" : L"squeezenet";
modelFromCatalog = modelCatalog.FindModelAsync(modelVariantName.c_str()).get();
if (modelFromCatalog != nullptr)
{
auto catalogModelInstanceOp = modelFromCatalog.GetInstanceAsync({});
catalogModelInstanceOp.Progress([](auto const& /*operation*/, double progress) {
std::wcout << L"Model download progress: " << progress << L"%\r";
});
auto catalogModelInstanceResult = co_await catalogModelInstanceOp;
if (catalogModelInstanceResult.Status() == winrt::Microsoft::Windows::AI::MachineLearning::CatalogModelInstanceStatus::Available)
{
auto catalogModelInstance = catalogModelInstanceResult.GetInstance();
auto modelPaths = catalogModelInstance.ModelPaths();
auto modelFolderPath = std::filesystem::path(modelPaths.GetAt(0).c_str());
std::wstring modelName = modelVariantName + L".onnx";
modelPath = modelFolderPath / modelName;
std::wcout << L"Using model from catalog at: " << modelPath.c_str() << std::endl;
// Get labels
labelsPath = modelFolderPath / L"SqueezeNet.Labels.txt";
}
else
{
std::wcout << L"Model download failed. Falling back to executableFolder" << std::endl;
modelPath = ModelManager::GetModelVariantPath(executableFolder, actualVariant);
}
}
else
{
std::wcout << L"Model with alias or ID '" << modelVariantName.c_str() << L"' not found in catalog. Falling back to executableFolder" << std::endl;
modelPath = ModelManager::GetModelVariantPath(executableFolder, actualVariant);
}
}
else if (options.model_path.empty())
{
// Use intelligent model variant selection based on execution provider and device capabilities
ModelVariant actualVariant = ModelManager::DetermineModelVariant(options, env);
modelPath = ModelManager::GetModelVariantPath(executableFolder, actualVariant);
}
else
{
// Use user-specified model path
modelPath = std::filesystem::path(options.model_path);
}
if (labelsPath.empty())
{
labelsPath = executableFolder / "SqueezeNet.Labels.txt";
}
// Load labels
std::vector<std::string> labels = ModelManager::LoadLabels(labelsPath);
std::filesystem::path outputPath =
options.output_path.empty() ? executableFolder / L"SqueezeNet_ctx.onnx" : std::filesystem::path(options.output_path);
std::filesystem::path imagePath =
options.image_path.empty() ? executableFolder / L"image.png" : std::filesystem::path(options.image_path);
// Determine the actual model to use
std::filesystem::path actualModelPath =
InferenceEngine::DetermineModelPath(options, modelPath, outputPath, sessionOptions, env);
// Create session
std::wcout << L"Loading model: " << actualModelPath.wstring().c_str() << std::endl;
Ort::Session session(env, actualModelPath.c_str(), sessionOptions);
// Set "Efficient" mode for MaxEfficiency performance mode
// NOTE: Only affects NPU and currently only supported for QNN EP and OpenVINO EP
if (options.perf_mode == PerformanceMode::MaxEfficiency)
{
const char* keys[] = {"ep.dynamic.workload_type"};
const char* values[] = {"Efficient"};
session.SetEpDynamicOptions(keys, values, 1);
}
// Get model input details
Ort::AllocatorWithDefaultOptions allocator;
auto inputName = session.GetInputNameAllocated(0, allocator);
auto outputName = session.GetOutputNameAllocated(0, allocator);
auto inputTypeInfo = session.GetInputTypeInfo(0);
auto inputTensorInfo = inputTypeInfo.GetTensorTypeAndShapeInfo();
std::vector<int64_t> inputShape = InferenceEngine::PrepareInputShape(inputTensorInfo.GetShape());
// Load and process image
ImageProcessor imageProcessor;
auto videoFrame = co_await imageProcessor.LoadImageFileAsync(winrt::hstring{imagePath.wstring()});
std::vector<float> inputTensorValues = imageProcessor.BindVideoFrameAsTensor(videoFrame);
// Create input tensor
auto inputTensor = InferenceEngine::CreateInputTensor(inputTensorValues, inputShape);
// Run inference
std::wcout << L"Running inference..." << std::endl;
auto outputTensors = InferenceEngine::RunInference(session, inputName.get(), outputName.get(), inputTensor);
// Extract results
std::vector<float> results = InferenceEngine::ExtractResults(outputTensors);
if (labels.empty())
{
std::wcout << L"Warning: Could not load labels. Using generic labels.\n";
for (size_t i = 0; i < results.size() && i < 10; ++i)
{
labels.push_back("Class " + std::to_string(i));
}
}
ResultProcessor::PrintResults(labels, results);
}
catch (std::exception const& ex)
{
std::wcout << L"Error: " << std::wstring(ex.what(), ex.what() + strlen(ex.what())).c_str() << std::endl;
}
}
int wmain(int argc, wchar_t* argv[])
{
winrt::init_apartment();
try
{
// Parse command line arguments
CommandLineOptions options;
if (!ArgumentParser::ParseCommandLineArgs(argc, argv, options))
{
return -1;
}
// Run the async inference
RunInferenceAsync(options).get();
std::wcout << L"\nPress any key to continue..." << std::endl;
std::wcin.get();
}
catch (...)
{
std::wcout << L"An unexpected error occurred." << std::endl;
return -1;
}
return 0;
}