DiffEq - Modern C++ ODE Integration Library 1.0.0
High-performance C++ library for solving ODEs with async signal processing
Loading...
Searching...
No Matches
advanced_asio_integration.cpp
1#include <diffeq.hpp>
2#include <boost/asio.hpp>
3#include <boost/asio/thread_pool.hpp>
4#include <boost/asio/co_spawn.hpp>
5#include <boost/asio/detached.hpp>
6#include <boost/asio/use_awaitable.hpp>
7#include <boost/asio/steady_timer.hpp>
8#include <iostream>
9#include <vector>
10#include <memory>
11#include <chrono>
12#include <random>
13#include <algorithm>
14#include <numeric>
15#include <fstream>
16#include <mutex>
17
18namespace asio = boost::asio;
19
26template<typename State>
28private:
29 asio::io_context io_context_;
30 asio::thread_pool thread_pool_;
31 std::unique_ptr<diffeq::core::AbstractIntegrator<State>> integrator_;
32
33 // 任务管理
34 std::atomic<size_t> active_tasks_{0};
35 std::atomic<size_t> completed_tasks_{0};
36 std::atomic<size_t> total_tasks_{0};
37
38 // 参数优化状态
39 std::vector<double> current_parameters_;
40 std::vector<std::pair<std::vector<double>, double>> optimization_history_;
41 std::mutex optimization_mutex_;
42
43 // 收敛控制
44 double tolerance_{1e-6};
45 size_t max_iterations_{100};
46 size_t current_iteration_{0};
47
48public:
53 size_t thread_count = std::thread::hardware_concurrency())
54 : thread_pool_(thread_count)
55 , integrator_(std::move(integrator)) {
56
57 if (!integrator_) {
58 throw std::invalid_argument("Integrator cannot be null");
59 }
60 }
61
65 void set_optimization_parameters(double tolerance, size_t max_iterations) {
66 tolerance_ = tolerance;
67 max_iterations_ = max_iterations;
68 }
69
73 template<typename ObjectiveFunction, typename ParameterUpdateFunction>
74 void optimize_parameters_async(const State& initial_state,
75 const std::vector<double>& initial_params,
76 ObjectiveFunction&& objective,
77 ParameterUpdateFunction&& param_update,
78 std::function<void(const std::vector<double>&, double)> callback = nullptr) {
79
80 current_parameters_ = initial_params;
81 current_iteration_ = 0;
82 optimization_history_.clear();
83
84 // 启动优化循环
85 start_optimization_loop(initial_state,
86 std::forward<ObjectiveFunction>(objective),
87 std::forward<ParameterUpdateFunction>(param_update),
88 std::move(callback));
89 }
90
94 void run(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()) {
95 if (timeout != std::chrono::milliseconds::max()) {
96 asio::steady_timer timer(io_context_, timeout);
97 timer.async_wait([this](const asio::error_code&) {
98 io_context_.stop();
99 });
100 }
101
102 io_context_.run();
103 }
104
108 const std::vector<std::pair<std::vector<double>, double>>& get_optimization_history() const {
109 return optimization_history_;
110 }
111
115 std::vector<double> get_current_parameters() const {
116 std::lock_guard<std::mutex> lock(optimization_mutex_);
117 return current_parameters_;
118 }
119
120private:
124 template<typename ObjectiveFunction, typename ParameterUpdateFunction>
125 void start_optimization_loop(const State& initial_state,
126 ObjectiveFunction&& objective,
127 ParameterUpdateFunction&& param_update,
128 std::function<void(const std::vector<double>&, double)> callback) {
129
130 asio::co_spawn(io_context_,
131 [this, initial_state, objective = std::forward<ObjectiveFunction>(objective),
132 param_update = std::forward<ParameterUpdateFunction>(param_update),
133 callback = std::move(callback)]() mutable -> asio::awaitable<void> {
134
135 double previous_objective = std::numeric_limits<double>::max();
136
137 while (current_iteration_ < max_iterations_) {
138 std::cout << "优化迭代 " << current_iteration_ << std::endl;
139
140 // 执行当前参数的积分
141 auto [final_state, objective_value] = co_await evaluate_parameters(
142 initial_state, current_parameters_, objective);
143
144 // 记录历史
145 {
146 std::lock_guard<std::mutex> lock(optimization_mutex_);
147 optimization_history_.emplace_back(current_parameters_, objective_value);
148 }
149
150 // 检查收敛
151 if (std::abs(objective_value - previous_objective) < tolerance_) {
152 std::cout << "优化收敛于迭代 " << current_iteration_ << std::endl;
153 break;
154 }
155
156 // 更新参数
157 auto new_params = param_update(current_parameters_, objective_value, final_state);
158
159 {
160 std::lock_guard<std::mutex> lock(optimization_mutex_);
161 current_parameters_ = std::move(new_params);
162 }
163
164 // 调用回调
165 if (callback) {
166 callback(current_parameters_, objective_value);
167 }
168
169 previous_objective = objective_value;
170 ++current_iteration_;
171
172 // 短暂延迟,避免过度占用资源
173 asio::steady_timer timer(io_context_, std::chrono::milliseconds(10));
174 co_await timer.async_wait(asio::use_awaitable);
175 }
176
177 std::cout << "优化完成,总迭代次数: " << current_iteration_ << std::endl;
178 }, asio::detached);
179 }
180
184 template<typename ObjectiveFunction>
185 asio::awaitable<std::pair<State, double>> evaluate_parameters(
186 const State& initial_state,
187 const std::vector<double>& params,
188 ObjectiveFunction&& objective) {
189
190 return co_await asio::co_spawn(thread_pool_,
191 [this, initial_state, params, objective = std::forward<ObjectiveFunction>(objective)]()
192 mutable -> asio::awaitable<std::pair<State, double>> {
193
194 // 复制状态和参数
195 State state = initial_state;
196 std::vector<double> local_params = params;
197
198 // 执行积分
199 integrator_->set_time(0.0);
200 integrator_->integrate(state, 0.01, 10.0);
201
202 // 计算目标函数值
203 double obj_value = objective(state, local_params, integrator_->current_time());
204
205 co_return std::make_pair(state, obj_value);
206 }, asio::use_awaitable);
207 }
208};
209
210// 示例:复杂系统定义
212 double sigma, rho, beta;
213
214 LorenzSystem(double s, double r, double b) : sigma(s), rho(r), beta(b) {}
215
216 void operator()(std::vector<double>& dx, const std::vector<double>& x, double t) const {
217 dx[0] = sigma * (x[1] - x[0]);
218 dx[1] = x[0] * (rho - x[2]) - x[1];
219 dx[2] = x[0] * x[1] - beta * x[2];
220 }
221};
222
223// 示例:目标函数 - 寻找混沌行为
225private:
226 std::vector<double> target_lyapunov_exponents_;
227
228public:
229 ChaosObjective(const std::vector<double>& target_exponents = {0.9, 0.0, -14.6})
230 : target_lyapunov_exponents_(target_exponents) {}
231
232 double operator()(const std::vector<double>& final_state,
233 const std::vector<double>& params,
234 double final_time) const {
235 // 简化的 Lyapunov 指数估计
236 // 在实际应用中,这里会计算真正的 Lyapunov 指数
237
238 double x = final_state[0], y = final_state[1], z = final_state[2];
239 double sigma = params[0], rho = params[1], beta = params[2];
240
241 // 基于系统参数的稳定性分析
242 double stability_metric = std::abs(x) + std::abs(y) + std::abs(z);
243 double parameter_balance = std::abs(sigma - 10.0) + std::abs(rho - 28.0) + std::abs(beta - 8.0/3.0);
244
245 // 目标:最大化混沌行为(高稳定性指标)同时保持参数接近经典值
246 return -stability_metric + 0.1 * parameter_balance;
247 }
248};
249
250// 示例:参数更新策略 - 梯度下降
252private:
253 double learning_rate_;
254 std::vector<double> parameter_bounds_;
255
256public:
257 GradientDescentOptimizer(double lr = 0.1,
258 const std::vector<double>& bounds = {1.0, 50.0, 1.0, 50.0, 1.0, 10.0})
259 : learning_rate_(lr), parameter_bounds_(bounds) {}
260
261 std::vector<double> operator()(const std::vector<double>& current_params,
262 double objective_value,
263 const std::vector<double>& final_state) const {
264
265 std::vector<double> new_params = current_params;
266
267 // 简化的梯度估计(在实际应用中会使用更复杂的数值微分)
268 for (size_t i = 0; i < new_params.size(); ++i) {
269 double perturbation = 0.01 * new_params[i];
270
271 // 计算数值梯度
272 double gradient = estimate_gradient(i, current_params, objective_value, perturbation);
273
274 // 更新参数
275 new_params[i] -= learning_rate_ * gradient;
276
277 // 应用边界约束
278 if (i * 2 + 1 < parameter_bounds_.size()) {
279 new_params[i] = std::max(parameter_bounds_[i * 2],
280 std::min(parameter_bounds_[i * 2 + 1], new_params[i]));
281 }
282 }
283
284 return new_params;
285 }
286
287private:
288 double estimate_gradient(size_t param_index,
289 const std::vector<double>& params,
290 double current_objective,
291 double perturbation) const {
292 // 简化的梯度估计
293 // 在实际应用中,这里会进行额外的函数评估
294
295 // 基于参数对目标函数的影响进行启发式估计
296 double param_value = params[param_index];
297 double normalized_value = param_value / (param_index + 1.0);
298
299 return normalized_value * std::sin(current_objective) * 0.1;
300 }
301};
302
303// 示例:结果可视化器
305private:
306 std::string output_dir_;
307 std::mutex file_mutex_;
308
309public:
310 explicit OptimizationVisualizer(std::string output_dir = "optimization_results/")
311 : output_dir_(std::move(output_dir)) {}
312
313 void visualize_progress(const std::vector<double>& params, double objective_value) {
314 std::lock_guard<std::mutex> lock(file_mutex_);
315
316 // 创建输出目录(在实际应用中)
317 // std::filesystem::create_directories(output_dir_);
318
319 // 记录参数和目标值
320 std::cout << "参数: [";
321 for (size_t i = 0; i < params.size(); ++i) {
322 std::cout << params[i];
323 if (i < params.size() - 1) std::cout << ", ";
324 }
325 std::cout << "], 目标值: " << objective_value << std::endl;
326 }
327
328 void save_final_results(const std::vector<std::pair<std::vector<double>, double>>& history) {
329 std::lock_guard<std::mutex> lock(file_mutex_);
330
331 std::cout << "\n=== 优化历史 ===" << std::endl;
332 for (size_t i = 0; i < history.size(); ++i) {
333 const auto& [params, obj_value] = history[i];
334 std::cout << "迭代 " << i << ": 目标值 = " << obj_value
335 << ", 参数 = [" << params[0] << ", " << params[1] << ", " << params[2] << "]" << std::endl;
336 }
337 }
338};
339
340int main() {
341 std::cout << "=== 高级 Boost.Asio 积分器集成示例 ===" << std::endl;
342 std::cout << "自适应参数优化系统" << std::endl;
343
344 // 创建积分器
345 auto integrator = std::make_unique<diffeq::RK4Integrator<std::vector<double>>>();
346
347 // 创建高级异步管理器
348 AdvancedAsioIntegrationManager<std::vector<double>> manager(std::move(integrator), 4);
349
350 // 设置优化参数
351 manager.set_optimization_parameters(1e-4, 20); // 容差和最大迭代次数
352
353 // 创建优化组件
354 ChaosObjective objective;
355 GradientDescentOptimizer optimizer(0.05);
356 OptimizationVisualizer visualizer;
357
358 // 初始参数(Lorenz 系统的经典参数附近)
359 std::vector<double> initial_params = {8.0, 25.0, 2.5}; // sigma, rho, beta
360 std::vector<double> initial_state = {1.0, 1.0, 1.0}; // x, y, z
361
362 std::cout << "\n开始参数优化..." << std::endl;
363 std::cout << "初始参数: [" << initial_params[0] << ", " << initial_params[1] << ", " << initial_params[2] << "]" << std::endl;
364
365 // 启动异步优化
366 manager.optimize_parameters_async(
367 initial_state,
368 initial_params,
369 objective,
370 optimizer,
371 [&visualizer](const std::vector<double>& params, double obj_value) {
372 visualizer.visualize_progress(params, obj_value);
373 }
374 );
375
376 // 运行事件循环
377 auto start_time = std::chrono::high_resolution_clock::now();
378 manager.run();
379 auto end_time = std::chrono::high_resolution_clock::now();
380
381 // 显示结果
382 auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time);
383 std::cout << "\n=== 优化完成 ===" << std::endl;
384 std::cout << "总耗时: " << duration.count() << "ms" << std::endl;
385
386 // 显示最终结果
387 auto final_params = manager.get_current_parameters();
388 std::cout << "最终参数: [" << final_params[0] << ", " << final_params[1] << ", " << final_params[2] << "]" << std::endl;
389
390 // 保存优化历史
391 const auto& history = manager.get_optimization_history();
392 visualizer.save_final_results(history);
393
394 // 验证最终结果
395 std::cout << "\n=== 验证最终参数 ===" << std::endl;
396 LorenzSystem final_system(final_params[0], final_params[1], final_params[2]);
397 std::cout << "Lorenz 系统参数: σ=" << final_params[0]
398 << ", ρ=" << final_params[1]
399 << ", β=" << final_params[2] << std::endl;
400
401 return 0;
402}
高级异步积分管理器 - 支持自适应参数优化
void optimize_parameters_async(const State &initial_state, const std::vector< double > &initial_params, ObjectiveFunction &&objective, ParameterUpdateFunction &&param_update, std::function< void(const std::vector< double > &, double)> callback=nullptr)
启动自适应参数优化
void run(std::chrono::milliseconds timeout=std::chrono::milliseconds::max())
运行事件循环
void set_optimization_parameters(double tolerance, size_t max_iterations)
设置优化参数
const std::vector< std::pair< std::vector< double >, double > > & get_optimization_history() const
获取优化历史
AdvancedAsioIntegrationManager(std::unique_ptr< diffeq::core::AbstractIntegrator< State > > integrator, size_t thread_count=std::thread::hardware_concurrency())
构造函数
std::vector< double > get_current_parameters() const
获取当前参数
Modern C++ ODE Integration Library with Real-time Signal Processing.