|
15 | 15 | #include <unistd.h> |
16 | 16 | #include <base64.h> |
17 | 17 | #include <fstream> |
| 18 | +#include <future> |
18 | 19 | #include <stdexcept> |
19 | 20 | #include <samplerate.h> |
20 | 21 | #include <semaphore.h> |
@@ -407,22 +408,36 @@ class llm_task { |
407 | 408 | timer time_total; |
408 | 409 | time_total.start(); |
409 | 410 | try { |
410 | | - int llm_ret = 0; |
411 | | - auto llm_thread_func = [this, &text, &prompt_text_embeds, &prompt_speech_embeds, &llm_ret]() { |
| 411 | + int llm_ret = 0; |
| 412 | + std::promise<int> prom; |
| 413 | + std::future<int> fut = prom.get_future(); |
| 414 | + auto llm_thread_func = [this, &text, &prompt_text_embeds, &prompt_speech_embeds, &llm_ret, &prom]() { |
412 | 415 | llm_ret = lLaMa_->Run(text, prompt_text_embeds, prompt_speech_embeds, g_token_buffer, g_buffer_mutex, |
413 | 416 | g_buffer_cv, g_llm_finished); |
| 417 | + prom.set_value(llm_ret); |
414 | 418 | }; |
415 | 419 | std::thread llm_thread(llm_thread_func); |
416 | 420 | llm_thread.detach(); |
| 421 | + |
| 422 | + if (fut.wait_for(std::chrono::milliseconds(10)) == std::future_status::ready) { |
| 423 | + int llm_ret = fut.get(); |
| 424 | + if (llm_ret == -1) { |
| 425 | + SLOGE("Error, Generate failed"); |
| 426 | + if (out_callback_) out_callback_("Error, Generate failed", true); |
| 427 | + return llm_ret; |
| 428 | + } |
| 429 | + } |
| 430 | + |
417 | 431 | int prompt_token_len = prompt_speech_embeds_flow.size() / lToken2Wav._attr.flow_embed_size; |
418 | 432 | if (prompt_token_len < 75) { |
419 | 433 | SLOGE("Error, prompt speech token len %d < 75", prompt_token_len); |
420 | 434 | if (llm_thread.joinable()) llm_thread.join(); |
| 435 | + if (out_callback_) { |
| 436 | + out_callback_("Error, prompt speech token len %d < 75", true); |
| 437 | + } |
421 | 438 | return -1; |
422 | 439 | } |
423 | | - if (llm_ret == -1) { |
424 | | - return llm_ret; |
425 | | - } |
| 440 | + |
426 | 441 | int prompt_token_align_len = 75; |
427 | 442 | std::vector<float> prompt_speech_embeds_flow1; |
428 | 443 | prompt_speech_embeds_flow1.insert(prompt_speech_embeds_flow1.begin(), prompt_speech_embeds_flow.begin(), |
|
0 commit comments