29 asio::io_context io_context_;
30 asio::thread_pool thread_pool_;
31 std::unique_ptr<diffeq::core::AbstractIntegrator<State>> integrator_;
34 std::atomic<size_t> active_tasks_{0};
35 std::atomic<size_t> completed_tasks_{0};
36 std::atomic<size_t> total_tasks_{0};
39 std::vector<double> current_parameters_;
40 std::vector<std::pair<std::vector<double>,
double>> optimization_history_;
41 std::mutex optimization_mutex_;
44 double tolerance_{1e-6};
45 size_t max_iterations_{100};
46 size_t current_iteration_{0};
53 size_t thread_count = std::thread::hardware_concurrency())
54 : thread_pool_(thread_count)
55 , integrator_(std::move(integrator)) {
58 throw std::invalid_argument(
"Integrator cannot be null");
66 tolerance_ = tolerance;
67 max_iterations_ = max_iterations;
73 template<
typename ObjectiveFunction,
typename ParameterUpdateFunction>
75 const std::vector<double>& initial_params,
76 ObjectiveFunction&& objective,
77 ParameterUpdateFunction&& param_update,
78 std::function<
void(
const std::vector<double>&,
double)> callback =
nullptr) {
80 current_parameters_ = initial_params;
81 current_iteration_ = 0;
82 optimization_history_.clear();
85 start_optimization_loop(initial_state,
86 std::forward<ObjectiveFunction>(objective),
87 std::forward<ParameterUpdateFunction>(param_update),
94 void run(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()) {
95 if (timeout != std::chrono::milliseconds::max()) {
96 asio::steady_timer timer(io_context_, timeout);
97 timer.async_wait([
this](
const asio::error_code&) {
109 return optimization_history_;
116 std::lock_guard<std::mutex> lock(optimization_mutex_);
117 return current_parameters_;
124 template<
typename ObjectiveFunction,
typename ParameterUpdateFunction>
125 void start_optimization_loop(
const State& initial_state,
126 ObjectiveFunction&& objective,
127 ParameterUpdateFunction&& param_update,
128 std::function<
void(
const std::vector<double>&,
double)> callback) {
130 asio::co_spawn(io_context_,
131 [
this, initial_state, objective = std::forward<ObjectiveFunction>(objective),
132 param_update = std::forward<ParameterUpdateFunction>(param_update),
133 callback = std::move(callback)]()
mutable -> asio::awaitable<void> {
135 double previous_objective = std::numeric_limits<double>::max();
137 while (current_iteration_ < max_iterations_) {
138 std::cout <<
"优化迭代 " << current_iteration_ << std::endl;
141 auto [final_state, objective_value] =
co_await evaluate_parameters(
142 initial_state, current_parameters_, objective);
146 std::lock_guard<std::mutex> lock(optimization_mutex_);
147 optimization_history_.emplace_back(current_parameters_, objective_value);
151 if (std::abs(objective_value - previous_objective) < tolerance_) {
152 std::cout <<
"优化收敛于迭代 " << current_iteration_ << std::endl;
157 auto new_params = param_update(current_parameters_, objective_value, final_state);
160 std::lock_guard<std::mutex> lock(optimization_mutex_);
161 current_parameters_ = std::move(new_params);
166 callback(current_parameters_, objective_value);
169 previous_objective = objective_value;
170 ++current_iteration_;
173 asio::steady_timer timer(io_context_, std::chrono::milliseconds(10));
174 co_await timer.async_wait(asio::use_awaitable);
177 std::cout <<
"优化完成,总迭代次数: " << current_iteration_ << std::endl;
184 template<
typename ObjectiveFunction>
185 asio::awaitable<std::pair<State, double>> evaluate_parameters(
186 const State& initial_state,
187 const std::vector<double>& params,
188 ObjectiveFunction&& objective) {
190 return co_await asio::co_spawn(thread_pool_,
191 [
this, initial_state, params, objective = std::forward<ObjectiveFunction>(objective)]()
192 mutable -> asio::awaitable<std::pair<State, double>> {
195 State state = initial_state;
196 std::vector<double> local_params = params;
199 integrator_->set_time(0.0);
200 integrator_->integrate(state, 0.01, 10.0);
203 double obj_value = objective(state, local_params, integrator_->current_time());
205 co_return std::make_pair(state, obj_value);
206 }, asio::use_awaitable);
226 std::vector<double> target_lyapunov_exponents_;
229 ChaosObjective(
const std::vector<double>& target_exponents = {0.9, 0.0, -14.6})
230 : target_lyapunov_exponents_(target_exponents) {}
232 double operator()(
const std::vector<double>& final_state,
233 const std::vector<double>& params,
234 double final_time)
const {
238 double x = final_state[0], y = final_state[1], z = final_state[2];
239 double sigma = params[0], rho = params[1], beta = params[2];
242 double stability_metric = std::abs(x) + std::abs(y) + std::abs(z);
243 double parameter_balance = std::abs(sigma - 10.0) + std::abs(rho - 28.0) + std::abs(beta - 8.0/3.0);
246 return -stability_metric + 0.1 * parameter_balance;
253 double learning_rate_;
254 std::vector<double> parameter_bounds_;
258 const std::vector<double>& bounds = {1.0, 50.0, 1.0, 50.0, 1.0, 10.0})
259 : learning_rate_(lr), parameter_bounds_(bounds) {}
261 std::vector<double> operator()(
const std::vector<double>& current_params,
262 double objective_value,
263 const std::vector<double>& final_state)
const {
265 std::vector<double> new_params = current_params;
268 for (
size_t i = 0; i < new_params.size(); ++i) {
269 double perturbation = 0.01 * new_params[i];
272 double gradient = estimate_gradient(i, current_params, objective_value, perturbation);
275 new_params[i] -= learning_rate_ * gradient;
278 if (i * 2 + 1 < parameter_bounds_.size()) {
279 new_params[i] = std::max(parameter_bounds_[i * 2],
280 std::min(parameter_bounds_[i * 2 + 1], new_params[i]));
288 double estimate_gradient(
size_t param_index,
289 const std::vector<double>& params,
290 double current_objective,
291 double perturbation)
const {
296 double param_value = params[param_index];
297 double normalized_value = param_value / (param_index + 1.0);
299 return normalized_value * std::sin(current_objective) * 0.1;
306 std::string output_dir_;
307 std::mutex file_mutex_;
311 : output_dir_(std::move(output_dir)) {}
313 void visualize_progress(
const std::vector<double>& params,
double objective_value) {
314 std::lock_guard<std::mutex> lock(file_mutex_);
320 std::cout <<
"参数: [";
321 for (
size_t i = 0; i < params.size(); ++i) {
322 std::cout << params[i];
323 if (i < params.size() - 1) std::cout <<
", ";
325 std::cout <<
"], 目标值: " << objective_value << std::endl;
328 void save_final_results(
const std::vector<std::pair<std::vector<double>,
double>>& history) {
329 std::lock_guard<std::mutex> lock(file_mutex_);
331 std::cout <<
"\n=== 优化历史 ===" << std::endl;
332 for (
size_t i = 0; i < history.size(); ++i) {
333 const auto& [params, obj_value] = history[i];
334 std::cout <<
"迭代 " << i <<
": 目标值 = " << obj_value
335 <<
", 参数 = [" << params[0] <<
", " << params[1] <<
", " << params[2] <<
"]" << std::endl;