use a shared async runtime instead of recreating each time

This commit is contained in:
Damien Elmes 2020-06-02 09:35:27 +10:00
parent 2147f75bc5
commit ac219ae728
2 changed files with 23 additions and 7 deletions

View File

@ -23,7 +23,7 @@ unicode-normalization = "0.1.12"
tempfile = "3.1.0" tempfile = "3.1.0"
serde = "1.0.104" serde = "1.0.104"
serde_json = "1.0.45" serde_json = "1.0.45"
tokio = { version = "0.2.11", features = ["fs"] } tokio = { version = "0.2.21", features = ["fs", "rt-threaded"] }
serde_derive = "1.0.104" serde_derive = "1.0.104"
zip = "0.5.4" zip = "0.5.4"
serde_tuple = "0.4.0" serde_tuple = "0.4.0"

View File

@ -54,7 +54,7 @@ use std::{
result, result,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
use tokio::runtime::Runtime; use tokio::runtime::{self, Runtime};
mod dbproxy; mod dbproxy;
@ -91,6 +91,7 @@ pub struct Backend {
sync_abort: Option<AbortHandle>, sync_abort: Option<AbortHandle>,
media_sync_abort: Option<AbortHandle>, media_sync_abort: Option<AbortHandle>,
progress_state: Arc<Mutex<ProgressState>>, progress_state: Arc<Mutex<ProgressState>>,
runtime: Option<Runtime>,
} }
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
@ -1155,6 +1156,7 @@ impl Backend {
want_abort: false, want_abort: false,
last_progress: None, last_progress: None,
})), })),
runtime: None,
} }
} }
@ -1203,6 +1205,20 @@ impl Backend {
} }
} }
fn runtime_handle(&mut self) -> runtime::Handle {
if self.runtime.is_none() {
self.runtime = Some(
runtime::Builder::new()
.threaded_scheduler()
.core_threads(1)
.enable_all()
.build()
.unwrap(),
)
}
self.runtime.as_ref().unwrap().handle().clone()
}
fn sync_media_inner( fn sync_media_inner(
&mut self, &mut self,
input: pb::SyncAuth, input: pb::SyncAuth,
@ -1217,7 +1233,7 @@ impl Backend {
let progress_fn = move |progress| handler.update(progress, true); let progress_fn = move |progress| handler.update(progress, true);
let mgr = MediaManager::new(&folder, &db)?; let mgr = MediaManager::new(&folder, &db)?;
let mut rt = Runtime::new().unwrap(); let rt = self.runtime_handle();
let sync_fut = mgr.sync_media(progress_fn, input.host_number, &input.hkey, log); let sync_fut = mgr.sync_media(progress_fn, input.host_number, &input.hkey, log);
let abortable_sync = Abortable::new(sync_fut, abort_reg); let abortable_sync = Abortable::new(sync_fut, abort_reg);
let ret = match rt.block_on(abortable_sync) { let ret = match rt.block_on(abortable_sync) {
@ -1235,7 +1251,7 @@ impl Backend {
let (abort_handle, abort_reg) = AbortHandle::new_pair(); let (abort_handle, abort_reg) = AbortHandle::new_pair();
self.sync_abort = Some(abort_handle); self.sync_abort = Some(abort_handle);
let mut rt = Runtime::new().unwrap(); let rt = self.runtime_handle();
let sync_fut = sync_login(&input.username, &input.password); let sync_fut = sync_login(&input.username, &input.password);
let abortable_sync = Abortable::new(sync_fut, abort_reg); let abortable_sync = Abortable::new(sync_fut, abort_reg);
let ret = match rt.block_on(abortable_sync) { let ret = match rt.block_on(abortable_sync) {
@ -1257,7 +1273,7 @@ impl Backend {
let (abort_handle, abort_reg) = AbortHandle::new_pair(); let (abort_handle, abort_reg) = AbortHandle::new_pair();
self.sync_abort = Some(abort_handle); self.sync_abort = Some(abort_handle);
let mut rt = Runtime::new().unwrap(); let rt = self.runtime_handle();
let input_copy = input.clone(); let input_copy = input.clone();
let ret = self.with_col(|col| { let ret = self.with_col(|col| {
@ -1298,6 +1314,8 @@ impl Backend {
} }
fn full_sync_inner(&mut self, input: pb::SyncAuth, upload: bool) -> Result<()> { fn full_sync_inner(&mut self, input: pb::SyncAuth, upload: bool) -> Result<()> {
let rt = self.runtime_handle();
let mut col = self.col.lock().unwrap(); let mut col = self.col.lock().unwrap();
if col.is_none() { if col.is_none() {
return Err(AnkiError::CollectionNotOpen); return Err(AnkiError::CollectionNotOpen);
@ -1321,8 +1339,6 @@ impl Backend {
handler.update(progress, throttle); handler.update(progress, throttle);
}; };
let mut rt = Runtime::new().unwrap();
let result = if upload { let result = if upload {
let sync_fut = col_inner.full_upload(input.into(), progress_fn); let sync_fut = col_inner.full_upload(input.into(), progress_fn);
let abortable_sync = Abortable::new(sync_fut, abort_reg); let abortable_sync = Abortable::new(sync_fut, abort_reg);