2020-05-03 04:24:18 +02:00
|
|
|
// Copyright: Ankitects Pty Ltd and contributors
|
|
|
|
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
|
|
|
|
2023-01-18 12:39:55 +01:00
|
|
|
use std::collections::HashSet;
|
|
|
|
use std::sync::Arc;
|
2021-04-18 10:29:20 +02:00
|
|
|
|
2023-06-12 03:40:10 +02:00
|
|
|
use anki_i18n::I18n;
|
2023-09-17 07:00:28 +02:00
|
|
|
use anki_proto::notetypes::stock_notetype::OriginalStockKind;
|
2023-09-17 07:21:20 +02:00
|
|
|
use anki_proto::notetypes::ImageOcclusionField;
|
2021-04-18 10:29:20 +02:00
|
|
|
use itertools::Itertools;
|
2022-12-24 01:44:40 +01:00
|
|
|
use tracing::debug;
|
2021-04-18 10:29:20 +02:00
|
|
|
|
2023-01-18 12:39:55 +01:00
|
|
|
use crate::collection::Collection;
|
|
|
|
use crate::config::SchedulerVersion;
|
|
|
|
use crate::error::AnkiError;
|
|
|
|
use crate::error::DbError;
|
|
|
|
use crate::error::DbErrorKind;
|
|
|
|
use crate::error::Result;
|
|
|
|
use crate::notetype::all_stock_notetypes;
|
|
|
|
use crate::notetype::AlreadyGeneratedCardInfo;
|
|
|
|
use crate::notetype::CardGenContext;
|
|
|
|
use crate::notetype::Notetype;
|
|
|
|
use crate::notetype::NotetypeId;
|
|
|
|
use crate::notetype::NotetypeKind;
|
|
|
|
use crate::prelude::*;
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
use crate::progress::ThrottlingProgressHandler;
|
2023-01-18 12:39:55 +01:00
|
|
|
use crate::timestamp::TimestampMillis;
|
|
|
|
use crate::timestamp::TimestampSecs;
|
2020-05-03 04:24:18 +02:00
|
|
|
|
2022-09-24 03:12:58 +02:00
|
|
|
#[derive(Debug, Default, PartialEq, Eq)]
|
2020-05-10 10:09:18 +02:00
|
|
|
pub struct CheckDatabaseOutput {
|
|
|
|
card_properties_invalid: usize,
|
|
|
|
card_position_too_high: usize,
|
|
|
|
cards_missing_note: usize,
|
2020-05-10 11:51:18 +02:00
|
|
|
decks_missing: usize,
|
2020-05-10 10:09:18 +02:00
|
|
|
revlog_properties_invalid: usize,
|
|
|
|
templates_missing: usize,
|
|
|
|
card_ords_duplicated: usize,
|
|
|
|
field_count_mismatch: usize,
|
2020-08-10 05:42:37 +02:00
|
|
|
notetypes_recovered: usize,
|
2020-11-06 01:21:51 +01:00
|
|
|
invalid_utf8: usize,
|
2023-03-19 01:58:35 +01:00
|
|
|
invalid_ids: usize,
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
#[derive(Debug, Clone, Copy, Default)]
|
|
|
|
pub enum DatabaseCheckProgress {
|
|
|
|
#[default]
|
2020-06-08 12:28:11 +02:00
|
|
|
Integrity,
|
|
|
|
Optimize,
|
|
|
|
Cards,
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
Notes {
|
|
|
|
current: usize,
|
|
|
|
total: usize,
|
|
|
|
},
|
2020-06-08 12:28:11 +02:00
|
|
|
History,
|
|
|
|
}
|
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
impl CheckDatabaseOutput {
|
2021-03-27 03:09:51 +01:00
|
|
|
pub fn to_i18n_strings(&self, tr: &I18n) -> Vec<String> {
|
2020-05-10 10:09:18 +02:00
|
|
|
let mut probs = Vec::new();
|
|
|
|
|
2020-08-10 05:42:37 +02:00
|
|
|
if self.notetypes_recovered > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_notetypes_recovered());
|
2020-08-10 05:42:37 +02:00
|
|
|
}
|
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
if self.card_position_too_high > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_new_card_high_due(self.card_position_too_high));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
if self.card_properties_invalid > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_card_properties(self.card_properties_invalid));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
if self.cards_missing_note > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_card_missing_note(self.cards_missing_note));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
2020-05-10 11:51:18 +02:00
|
|
|
if self.decks_missing > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_missing_decks(self.decks_missing));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
if self.field_count_mismatch > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_field_count(self.field_count_mismatch));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
if self.card_ords_duplicated > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_duplicate_card_ords(self.card_ords_duplicated));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
if self.templates_missing > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_missing_templates(self.templates_missing));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
if self.revlog_properties_invalid > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_revlog_properties(self.revlog_properties_invalid));
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
2020-11-06 01:21:51 +01:00
|
|
|
if self.invalid_utf8 > 0 {
|
2021-03-27 03:09:51 +01:00
|
|
|
probs.push(tr.database_check_notes_with_invalid_utf8(self.invalid_utf8));
|
2020-11-06 01:21:51 +01:00
|
|
|
}
|
2023-03-19 01:58:35 +01:00
|
|
|
if self.invalid_ids > 0 {
|
|
|
|
probs.push(tr.database_check_fixed_invalid_ids(self.invalid_ids));
|
|
|
|
}
|
2020-05-10 10:09:18 +02:00
|
|
|
|
2021-03-27 01:39:53 +01:00
|
|
|
probs.into_iter().map(Into::into).collect()
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-03 04:24:18 +02:00
|
|
|
impl Collection {
|
2020-05-10 05:50:04 +02:00
|
|
|
/// Check the database, returning a list of problems that were fixed.
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
pub(crate) fn check_database(&mut self) -> Result<CheckDatabaseOutput> {
|
|
|
|
let mut progress = self.new_progress_handler();
|
|
|
|
progress.set(DatabaseCheckProgress::Integrity)?;
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("quick check");
|
2020-05-10 05:50:04 +02:00
|
|
|
if self.storage.quick_check_corrupt() {
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("quick check failed");
|
2021-04-01 09:34:03 +02:00
|
|
|
return Err(AnkiError::db_error(
|
|
|
|
self.tr.database_check_corrupt(),
|
|
|
|
DbErrorKind::Corrupt,
|
|
|
|
));
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
progress.set(DatabaseCheckProgress::Optimize)?;
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("optimize");
|
2020-05-10 05:50:04 +02:00
|
|
|
self.storage.optimize()?;
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
self.transact_no_undo(|col| col.check_database_inner(progress))
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
fn check_database_inner(
|
|
|
|
&mut self,
|
|
|
|
mut progress: ThrottlingProgressHandler<DatabaseCheckProgress>,
|
|
|
|
) -> Result<CheckDatabaseOutput> {
|
2020-05-10 10:09:18 +02:00
|
|
|
let mut out = CheckDatabaseOutput::default();
|
2020-05-10 05:50:04 +02:00
|
|
|
|
|
|
|
// cards first, as we need to be able to read them to process notes
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
progress.set(DatabaseCheckProgress::Cards)?;
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("check cards");
|
2020-05-10 10:09:18 +02:00
|
|
|
self.check_card_properties(&mut out)?;
|
|
|
|
self.check_orphaned_cards(&mut out)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("check decks");
|
2020-05-10 10:09:18 +02:00
|
|
|
self.check_missing_deck_ids(&mut out)?;
|
|
|
|
self.check_filtered_cards(&mut out)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("check notetypes");
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
self.check_notetypes(&mut out, &mut progress)?;
|
2020-06-08 12:28:11 +02:00
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
progress.set(DatabaseCheckProgress::History)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("check review log");
|
2020-05-10 10:09:18 +02:00
|
|
|
self.check_revlog(&mut out)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("missing decks");
|
2020-05-10 11:51:18 +02:00
|
|
|
self.check_missing_deck_names(&mut out)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
|
|
|
self.update_next_new_position()?;
|
|
|
|
|
2023-03-19 01:58:35 +01:00
|
|
|
debug!("invalid ids");
|
2023-03-19 22:04:59 +01:00
|
|
|
out.invalid_ids = self.maybe_fix_invalid_ids()?;
|
2023-03-19 01:58:35 +01:00
|
|
|
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("db check finished: {:#?}", out);
|
2020-05-10 05:50:04 +02:00
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
Ok(out)
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
fn check_card_properties(&mut self, out: &mut CheckDatabaseOutput) -> Result<()> {
|
2020-05-10 05:50:04 +02:00
|
|
|
let timing = self.timing_today()?;
|
|
|
|
let (new_cnt, other_cnt) = self.storage.fix_card_properties(
|
|
|
|
timing.days_elapsed,
|
|
|
|
TimestampSecs::now(),
|
|
|
|
self.usn()?,
|
2021-03-19 13:57:43 +01:00
|
|
|
self.scheduler_version() == SchedulerVersion::V1,
|
2020-05-10 05:50:04 +02:00
|
|
|
)?;
|
2020-05-10 10:09:18 +02:00
|
|
|
out.card_position_too_high = new_cnt;
|
|
|
|
out.card_properties_invalid += other_cnt;
|
2020-05-10 05:50:04 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
fn check_orphaned_cards(&mut self, out: &mut CheckDatabaseOutput) -> Result<()> {
|
|
|
|
let cnt = self.storage.delete_orphaned_cards()?;
|
|
|
|
if cnt > 0 {
|
2021-04-18 09:30:02 +02:00
|
|
|
self.set_schema_modified()?;
|
2020-05-10 10:09:18 +02:00
|
|
|
out.cards_missing_note = cnt;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
fn check_missing_deck_ids(&mut self, out: &mut CheckDatabaseOutput) -> Result<()> {
|
2020-05-17 11:07:15 +02:00
|
|
|
let usn = self.usn()?;
|
2020-05-10 05:50:04 +02:00
|
|
|
for did in self.storage.missing_decks()? {
|
2020-05-17 11:07:15 +02:00
|
|
|
self.recover_missing_deck(did, usn)?;
|
2020-05-10 11:51:18 +02:00
|
|
|
out.decks_missing += 1;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
fn check_filtered_cards(&mut self, out: &mut CheckDatabaseOutput) -> Result<()> {
|
2021-02-21 06:50:41 +01:00
|
|
|
let decks = self.storage.get_decks_map()?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
|
|
|
let mut wrong = 0;
|
|
|
|
for (cid, did) in self.storage.all_filtered_cards_by_deck()? {
|
|
|
|
// we expect calling code to ensure all decks already exist
|
|
|
|
if let Some(deck) = decks.get(&did) {
|
|
|
|
if !deck.is_filtered() {
|
|
|
|
let mut card = self.storage.get_card(cid)?.unwrap();
|
2020-08-31 09:09:49 +02:00
|
|
|
card.original_deck_id.0 = 0;
|
|
|
|
card.original_due = 0;
|
2020-05-10 05:50:04 +02:00
|
|
|
self.storage.update_card(&card)?;
|
|
|
|
wrong += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if wrong > 0 {
|
2021-04-18 09:30:02 +02:00
|
|
|
self.set_schema_modified()?;
|
2020-05-10 10:09:18 +02:00
|
|
|
out.card_properties_invalid += wrong;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
2020-05-03 04:24:18 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-05-10 05:50:04 +02:00
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
fn check_notetypes(
|
2020-06-08 12:28:11 +02:00
|
|
|
&mut self,
|
|
|
|
out: &mut CheckDatabaseOutput,
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
progress: &mut ThrottlingProgressHandler<DatabaseCheckProgress>,
|
|
|
|
) -> Result<()> {
|
2020-05-10 05:50:04 +02:00
|
|
|
let nids_by_notetype = self.storage.all_note_ids_by_notetype()?;
|
2021-05-21 09:50:41 +02:00
|
|
|
let norm = self.get_config_bool(BoolKey::NormalizeNoteText);
|
2020-05-10 05:50:04 +02:00
|
|
|
let usn = self.usn()?;
|
2022-05-07 02:30:23 +02:00
|
|
|
let stamp_millis = TimestampMillis::now();
|
|
|
|
let stamp_secs = TimestampSecs::now();
|
2020-05-10 05:50:04 +02:00
|
|
|
|
2021-02-02 09:12:50 +01:00
|
|
|
let expanded_tags = self.storage.expanded_tags()?;
|
2021-03-02 10:02:00 +01:00
|
|
|
self.storage.clear_all_tags()?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
2020-06-08 12:28:11 +02:00
|
|
|
let total_notes = self.storage.total_notes()?;
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
progress.set(DatabaseCheckProgress::Notes {
|
|
|
|
current: 0,
|
|
|
|
total: total_notes as usize,
|
|
|
|
})?;
|
2020-05-10 05:50:04 +02:00
|
|
|
for (ntid, group) in &nids_by_notetype.into_iter().group_by(|tup| tup.0) {
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("check notetype: {}", ntid);
|
2020-05-10 05:50:04 +02:00
|
|
|
let mut group = group.peekable();
|
2023-09-17 07:00:28 +02:00
|
|
|
let mut nt = match self.get_notetype(ntid)? {
|
2020-05-10 05:50:04 +02:00
|
|
|
None => {
|
|
|
|
let first_note = self.storage.get_note(group.peek().unwrap().1)?.unwrap();
|
2020-08-10 05:42:37 +02:00
|
|
|
out.notetypes_recovered += 1;
|
2022-05-07 02:30:23 +02:00
|
|
|
self.recover_notetype(stamp_millis, first_note.fields().len(), ntid)?
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
Some(nt) => nt,
|
|
|
|
};
|
|
|
|
|
2023-09-17 07:00:28 +02:00
|
|
|
self.add_missing_field_tags(Arc::make_mut(&mut nt))?;
|
|
|
|
|
2020-05-10 05:50:04 +02:00
|
|
|
let mut genctx = None;
|
|
|
|
for (_, nid) in group {
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
progress.increment(|p| {
|
2023-09-02 08:13:03 +02:00
|
|
|
let DatabaseCheckProgress::Notes { current, .. } = p else {
|
|
|
|
unreachable!()
|
|
|
|
};
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
current
|
|
|
|
})?;
|
2020-06-08 12:28:11 +02:00
|
|
|
|
2020-11-06 01:21:51 +01:00
|
|
|
let mut note = self.get_note_fixing_invalid_utf8(nid, out)?;
|
2021-03-02 10:02:00 +01:00
|
|
|
let original = note.clone();
|
2020-05-10 05:50:04 +02:00
|
|
|
|
|
|
|
let cards = self.storage.existing_cards_for_note(nid)?;
|
2020-05-10 10:09:18 +02:00
|
|
|
|
|
|
|
out.card_ords_duplicated += self.remove_duplicate_card_ordinals(&cards)?;
|
|
|
|
out.templates_missing += self.remove_cards_without_template(&nt, &cards)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
|
|
|
|
// fix fields
|
2021-03-02 10:02:00 +01:00
|
|
|
if note.fields().len() != nt.fields.len() {
|
2020-05-10 05:50:04 +02:00
|
|
|
note.fix_field_count(&nt);
|
|
|
|
note.tags.push("db-check".into());
|
2020-05-10 10:09:18 +02:00
|
|
|
out.field_count_mismatch += 1;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
2022-05-07 02:30:23 +02:00
|
|
|
if note.mtime > stamp_secs {
|
|
|
|
note.mtime = stamp_secs;
|
|
|
|
}
|
|
|
|
|
2020-08-08 04:26:00 +02:00
|
|
|
// note type ID may have changed if we created a recovery notetype
|
2020-08-31 09:09:49 +02:00
|
|
|
note.notetype_id = nt.id;
|
2020-08-08 04:26:00 +02:00
|
|
|
|
2020-05-10 05:50:04 +02:00
|
|
|
// write note, updating tags and generating missing cards
|
2021-04-29 10:48:22 +02:00
|
|
|
let ctx = genctx.get_or_insert_with(|| {
|
2022-06-01 12:26:16 +02:00
|
|
|
CardGenContext::new(
|
|
|
|
nt.as_ref(),
|
|
|
|
self.get_last_deck_added_to_for_notetype(nt.id),
|
|
|
|
usn,
|
|
|
|
)
|
2021-04-29 10:48:22 +02:00
|
|
|
});
|
2021-04-29 15:28:42 +02:00
|
|
|
self.update_note_inner_generating_cards(
|
2021-06-21 04:32:11 +02:00
|
|
|
ctx, &mut note, &original, false, norm, true,
|
2021-04-29 15:28:42 +02:00
|
|
|
)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-16 11:38:16 +01:00
|
|
|
// the note rebuilding process took care of adding tags back, so we just need
|
|
|
|
// to ensure to restore the collapse state
|
2021-02-02 09:12:50 +01:00
|
|
|
self.storage.restore_expanded_tags(&expanded_tags)?;
|
2021-01-12 21:12:35 +01:00
|
|
|
|
2023-01-18 12:39:55 +01:00
|
|
|
// if the collection is empty and the user has deleted all note types, ensure at
|
|
|
|
// least one note type exists
|
2020-05-10 05:50:04 +02:00
|
|
|
if self.storage.get_all_notetype_names()?.is_empty() {
|
2021-03-27 03:09:51 +01:00
|
|
|
let mut nt = all_stock_notetypes(&self.tr).remove(0);
|
2021-07-28 21:46:51 +02:00
|
|
|
self.add_notetype_inner(&mut nt, usn, true)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
2020-08-10 05:42:37 +02:00
|
|
|
if out.card_ords_duplicated > 0
|
|
|
|
|| out.field_count_mismatch > 0
|
|
|
|
|| out.templates_missing > 0
|
|
|
|
|| out.notetypes_recovered > 0
|
2020-05-10 10:09:18 +02:00
|
|
|
{
|
2021-04-18 09:30:02 +02:00
|
|
|
self.set_schema_modified()?;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-11-06 01:21:51 +01:00
|
|
|
fn get_note_fixing_invalid_utf8(
|
|
|
|
&self,
|
2021-03-27 10:53:33 +01:00
|
|
|
nid: NoteId,
|
2020-11-06 01:21:51 +01:00
|
|
|
out: &mut CheckDatabaseOutput,
|
|
|
|
) -> Result<Note> {
|
|
|
|
match self.storage.get_note(nid) {
|
|
|
|
Ok(note) => Ok(note.unwrap()),
|
|
|
|
Err(err) => match err {
|
2022-10-21 10:02:12 +02:00
|
|
|
AnkiError::DbError {
|
|
|
|
source:
|
|
|
|
DbError {
|
|
|
|
kind: DbErrorKind::Utf8,
|
|
|
|
..
|
|
|
|
},
|
|
|
|
} => {
|
2020-11-06 01:21:51 +01:00
|
|
|
// fix note then fetch again
|
|
|
|
self.storage.fix_invalid_utf8_in_note(nid)?;
|
|
|
|
out.invalid_utf8 += 1;
|
|
|
|
Ok(self.storage.get_note(nid)?.unwrap())
|
|
|
|
}
|
|
|
|
// other errors are unhandled
|
2020-11-24 11:13:05 +01:00
|
|
|
_ => Err(err),
|
2020-11-06 01:21:51 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-10 05:50:04 +02:00
|
|
|
fn remove_duplicate_card_ordinals(
|
|
|
|
&mut self,
|
|
|
|
cards: &[AlreadyGeneratedCardInfo],
|
|
|
|
) -> Result<usize> {
|
|
|
|
let mut ords = HashSet::new();
|
|
|
|
let mut removed = 0;
|
|
|
|
for card in cards {
|
|
|
|
if !ords.insert(card.ord) {
|
|
|
|
self.storage.remove_card(card.id)?;
|
|
|
|
removed += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(removed)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn remove_cards_without_template(
|
|
|
|
&mut self,
|
2021-03-27 13:03:19 +01:00
|
|
|
nt: &Notetype,
|
2020-05-10 05:50:04 +02:00
|
|
|
cards: &[AlreadyGeneratedCardInfo],
|
|
|
|
) -> Result<usize> {
|
2021-03-27 13:03:19 +01:00
|
|
|
if nt.config.kind() == NotetypeKind::Cloze {
|
2020-05-10 05:50:04 +02:00
|
|
|
return Ok(0);
|
|
|
|
}
|
|
|
|
let mut removed = 0;
|
|
|
|
for card in cards {
|
|
|
|
if card.ord as usize >= nt.templates.len() {
|
|
|
|
self.storage.remove_card(card.id)?;
|
|
|
|
removed += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(removed)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn recover_notetype(
|
|
|
|
&mut self,
|
|
|
|
stamp: TimestampMillis,
|
|
|
|
field_count: usize,
|
2021-03-27 13:03:19 +01:00
|
|
|
previous_id: NotetypeId,
|
|
|
|
) -> Result<Arc<Notetype>> {
|
2022-12-24 01:44:40 +01:00
|
|
|
debug!("create recovery notetype");
|
2020-08-10 05:42:37 +02:00
|
|
|
let extra_cards_required = self
|
|
|
|
.storage
|
|
|
|
.highest_card_ordinal_for_notetype(previous_id)?;
|
2021-03-27 03:09:51 +01:00
|
|
|
let mut basic = all_stock_notetypes(&self.tr).remove(0);
|
2020-05-10 05:50:04 +02:00
|
|
|
let mut field = 3;
|
|
|
|
while basic.fields.len() < field_count {
|
|
|
|
basic.add_field(format!("{}", field));
|
|
|
|
field += 1;
|
|
|
|
}
|
|
|
|
basic.name = format!("db-check-{}-{}", stamp, field_count);
|
2020-08-10 05:42:37 +02:00
|
|
|
let qfmt = basic.templates[0].config.q_format.clone();
|
|
|
|
let afmt = basic.templates[0].config.a_format.clone();
|
|
|
|
for n in 0..extra_cards_required {
|
|
|
|
basic.add_template(&format!("Card {}", n + 2), &qfmt, &afmt);
|
|
|
|
}
|
2021-07-28 21:46:51 +02:00
|
|
|
self.add_notetype(&mut basic, true)?;
|
2020-05-10 05:50:04 +02:00
|
|
|
Ok(Arc::new(basic))
|
|
|
|
}
|
|
|
|
|
2021-04-18 09:30:02 +02:00
|
|
|
fn check_revlog(&mut self, out: &mut CheckDatabaseOutput) -> Result<()> {
|
2020-05-10 05:50:04 +02:00
|
|
|
let cnt = self.storage.fix_revlog_properties()?;
|
|
|
|
if cnt > 0 {
|
2021-04-18 09:30:02 +02:00
|
|
|
self.set_schema_modified()?;
|
2020-05-10 10:09:18 +02:00
|
|
|
out.revlog_properties_invalid = cnt;
|
2020-05-10 05:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-10 11:51:18 +02:00
|
|
|
fn check_missing_deck_names(&mut self, out: &mut CheckDatabaseOutput) -> Result<()> {
|
|
|
|
let names = self.storage.get_all_deck_names()?;
|
|
|
|
out.decks_missing += self.add_missing_deck_names(&names)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-03-09 01:58:55 +01:00
|
|
|
fn update_next_new_position(&mut self) -> Result<()> {
|
2020-05-10 05:50:04 +02:00
|
|
|
let pos = self.storage.max_new_card_position().unwrap_or(0);
|
|
|
|
self.set_next_card_position(pos)
|
|
|
|
}
|
2023-03-19 01:58:35 +01:00
|
|
|
|
2023-03-19 22:04:59 +01:00
|
|
|
pub(crate) fn maybe_fix_invalid_ids(&mut self) -> Result<usize> {
|
2023-03-19 01:58:35 +01:00
|
|
|
let now = TimestampMillis::now();
|
|
|
|
let tomorrow = now.adding_secs(24 * 60 * 60).0;
|
2023-03-19 22:04:59 +01:00
|
|
|
let num_invalid_ids = self.storage.invalid_ids(tomorrow)?;
|
|
|
|
if num_invalid_ids > 0 {
|
2023-03-19 01:58:35 +01:00
|
|
|
self.storage.fix_invalid_ids(tomorrow, now.0)?;
|
|
|
|
self.set_schema_modified()?;
|
|
|
|
}
|
2023-03-19 22:04:59 +01:00
|
|
|
Ok(num_invalid_ids)
|
2023-03-19 01:58:35 +01:00
|
|
|
}
|
2023-09-17 07:00:28 +02:00
|
|
|
fn add_missing_field_tags(&mut self, nt: &mut Notetype) -> Result<()> {
|
|
|
|
// we only try to fix I/O, as the other notetypes have been in circulation too
|
|
|
|
// long, and there's too much of a risk that the user has reordered the fields
|
|
|
|
// already. We could try to match on field name in the future though.
|
|
|
|
let usn = self.usn()?;
|
|
|
|
if let OriginalStockKind::ImageOcclusion = nt.config.original_stock_kind() {
|
|
|
|
let mut changed = false;
|
|
|
|
if nt.fields.len() >= 5 {
|
|
|
|
for i in 0..5 {
|
|
|
|
let conf = &mut nt.fields[i].config;
|
|
|
|
if !conf.prevent_deletion {
|
|
|
|
changed = true;
|
2023-09-17 07:21:20 +02:00
|
|
|
conf.prevent_deletion = i != ImageOcclusionField::Comments as usize;
|
2023-09-17 07:00:28 +02:00
|
|
|
conf.tag = Some(i as u32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if changed {
|
|
|
|
nt.set_modified(usn);
|
|
|
|
self.add_or_update_notetype_with_existing_id_inner(nt, None, usn, true)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-05-03 04:24:18 +02:00
|
|
|
}
|
2020-05-10 10:09:18 +02:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
2023-01-18 12:39:55 +01:00
|
|
|
use crate::decks::DeckId;
|
|
|
|
use crate::search::SortMode;
|
2020-05-10 10:09:18 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cards() -> Result<()> {
|
2023-03-19 01:58:35 +01:00
|
|
|
let mut col = Collection::new();
|
2020-05-10 10:09:18 +02:00
|
|
|
let nt = col.get_notetype_by_name("Basic")?.unwrap();
|
|
|
|
let mut note = nt.new_note();
|
2021-03-27 10:53:33 +01:00
|
|
|
col.add_note(&mut note, DeckId(1))?;
|
2020-05-10 10:09:18 +02:00
|
|
|
|
|
|
|
// card properties
|
|
|
|
col.storage
|
|
|
|
.db
|
|
|
|
.execute_batch("update cards set ivl=1.5,due=2000000,odue=1.5")?;
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 10:09:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
card_properties_invalid: 2,
|
|
|
|
card_position_too_high: 1,
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
// should be idempotent
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
assert_eq!(col.check_database()?, Default::default());
|
2020-05-10 10:09:18 +02:00
|
|
|
|
|
|
|
// missing deck
|
|
|
|
col.storage.db.execute_batch("update cards set did=123")?;
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 10:09:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
2020-05-10 11:51:18 +02:00
|
|
|
decks_missing: 1,
|
2020-05-10 10:09:18 +02:00
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-04-18 01:33:39 +02:00
|
|
|
col.storage
|
|
|
|
.get_deck(DeckId(123))?
|
|
|
|
.unwrap()
|
|
|
|
.name
|
|
|
|
.as_native_str(),
|
2020-05-10 10:09:18 +02:00
|
|
|
"recovered123"
|
|
|
|
);
|
|
|
|
|
2020-05-10 11:51:18 +02:00
|
|
|
// missing note
|
|
|
|
col.storage.remove_note(note.id)?;
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 11:51:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
cards_missing_note: 1,
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
col.storage.db_scalar::<u32>("select count(*) from cards")?,
|
|
|
|
0
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn revlog() -> Result<()> {
|
2023-03-19 01:58:35 +01:00
|
|
|
let mut col = Collection::new();
|
2020-05-10 11:51:18 +02:00
|
|
|
|
|
|
|
col.storage.db.execute_batch(
|
|
|
|
"
|
|
|
|
insert into revlog (id,cid,usn,ease,ivl,lastIvl,factor,time,type)
|
|
|
|
values (0,0,0,0,1.5,1.5,0,0,0)",
|
|
|
|
)?;
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 11:51:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
revlog_properties_invalid: 1,
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
2021-06-21 04:32:11 +02:00
|
|
|
assert!(col
|
|
|
|
.storage
|
|
|
|
.db_scalar::<bool>("select ivl = lastIvl = 1 from revlog")?);
|
2020-05-10 11:51:18 +02:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn note_card_link() -> Result<()> {
|
2023-03-19 01:58:35 +01:00
|
|
|
let mut col = Collection::new();
|
2020-05-10 11:51:18 +02:00
|
|
|
let nt = col.get_notetype_by_name("Basic")?.unwrap();
|
|
|
|
let mut note = nt.new_note();
|
2021-03-27 10:53:33 +01:00
|
|
|
col.add_note(&mut note, DeckId(1))?;
|
2020-05-10 11:51:18 +02:00
|
|
|
|
2020-05-10 10:09:18 +02:00
|
|
|
// duplicate ordinals
|
|
|
|
let cid = col.search_cards("", SortMode::NoOrder)?[0];
|
|
|
|
let mut card = col.storage.get_card(cid)?.unwrap();
|
|
|
|
card.id.0 += 1;
|
|
|
|
col.storage.add_card(&mut card)?;
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 10:09:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
card_ords_duplicated: 1,
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
col.storage.db_scalar::<u32>("select count(*) from cards")?,
|
|
|
|
1
|
|
|
|
);
|
|
|
|
|
|
|
|
// missing templates
|
|
|
|
let cid = col.search_cards("", SortMode::NoOrder)?[0];
|
|
|
|
let mut card = col.storage.get_card(cid)?.unwrap();
|
|
|
|
card.id.0 += 1;
|
2020-08-31 09:09:49 +02:00
|
|
|
card.template_idx = 10;
|
2020-05-10 10:09:18 +02:00
|
|
|
col.storage.add_card(&mut card)?;
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 10:09:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
templates_missing: 1,
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
col.storage.db_scalar::<u32>("select count(*) from cards")?,
|
|
|
|
1
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-05-10 11:51:18 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn note_fields() -> Result<()> {
|
2023-03-19 01:58:35 +01:00
|
|
|
let mut col = Collection::new();
|
2020-05-10 11:51:18 +02:00
|
|
|
let nt = col.get_notetype_by_name("Basic")?.unwrap();
|
|
|
|
let mut note = nt.new_note();
|
2021-03-27 10:53:33 +01:00
|
|
|
col.add_note(&mut note, DeckId(1))?;
|
2020-05-10 11:51:18 +02:00
|
|
|
|
|
|
|
// excess fields get joined into the last one
|
|
|
|
col.storage
|
|
|
|
.db
|
|
|
|
.execute_batch("update notes set flds = 'a\x1fb\x1fc\x1fd'")?;
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 11:51:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
field_count_mismatch: 1,
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
let note = col.storage.get_note(note.id)?.unwrap();
|
2021-03-02 10:02:00 +01:00
|
|
|
assert_eq!(¬e.fields()[..], &["a", "b; c; d"]);
|
2020-05-10 11:51:18 +02:00
|
|
|
|
|
|
|
// missing fields get filled with blanks
|
|
|
|
col.storage
|
|
|
|
.db
|
|
|
|
.execute_batch("update notes set flds = 'a'")?;
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 11:51:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
field_count_mismatch: 1,
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
let note = col.storage.get_note(note.id)?.unwrap();
|
2021-03-02 10:02:00 +01:00
|
|
|
assert_eq!(¬e.fields()[..], &["a", ""]);
|
2020-05-10 11:51:18 +02:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn deck_names() -> Result<()> {
|
2023-03-19 01:58:35 +01:00
|
|
|
let mut col = Collection::new();
|
2020-05-10 11:51:18 +02:00
|
|
|
|
|
|
|
let deck = col.get_or_create_normal_deck("foo::bar::baz")?;
|
|
|
|
// includes default
|
|
|
|
assert_eq!(col.storage.get_all_deck_names()?.len(), 4);
|
|
|
|
|
|
|
|
col.storage
|
|
|
|
.db
|
|
|
|
.prepare("delete from decks where id != ? and id != 1")?
|
2021-06-25 08:22:21 +02:00
|
|
|
.execute([deck.id])?;
|
2020-05-10 11:51:18 +02:00
|
|
|
assert_eq!(col.storage.get_all_deck_names()?.len(), 2);
|
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
let out = col.check_database()?;
|
2020-05-10 11:51:18 +02:00
|
|
|
assert_eq!(
|
|
|
|
out,
|
|
|
|
CheckDatabaseOutput {
|
|
|
|
decks_missing: 1, // only counts the immediate parent that was missing
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
&col.storage
|
|
|
|
.get_all_deck_names()?
|
|
|
|
.iter()
|
|
|
|
.map(|(_, name)| name)
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&["Default", "foo", "foo::bar", "foo::bar::baz"]
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2021-01-16 11:38:16 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn tags() -> Result<()> {
|
2023-03-19 01:58:35 +01:00
|
|
|
let mut col = Collection::new();
|
2021-01-16 11:38:16 +01:00
|
|
|
let nt = col.get_notetype_by_name("Basic")?.unwrap();
|
|
|
|
let mut note = nt.new_note();
|
|
|
|
note.tags.push("one".into());
|
|
|
|
note.tags.push("two".into());
|
2021-03-27 10:53:33 +01:00
|
|
|
col.add_note(&mut note, DeckId(1))?;
|
2021-01-16 11:38:16 +01:00
|
|
|
|
2021-04-05 03:41:53 +02:00
|
|
|
col.set_tag_collapsed("one", false)?;
|
2021-01-16 11:38:16 +01:00
|
|
|
|
Refactor progress handling (#2549)
Previously it was Backend's responsibility to store the last progress,
and when calling routines in Collection, one had to construct and pass
in a Fn, which wasn't the most ergonomic. This PR adds the last progress
state to the collection, so that the routines no longer need a separate
progress arg, and makes some other tweaks to improve ergonomics.
ThrottlingProgressHandler has been tweaked so that it now stores the
current state, so that callers don't need to store it separately. When
a long-running routine starts, it calls col.new_progress_handler(),
which automatically initializes the data to defaults, and updates the
shared UI state, so we no longer need to manually update the state at
the start of an operation.
The backend shares the Arc<Mutex<>> with the collection, so it can get
at the current state, and so we can update the state when importing a
backup.
Other tweaks:
- The current Incrementor was awkward to use in the media check, which
uses a single incrementing value across multiple method calls, so I've
added a simpler alternative for such cases. The old incrementor method
has been kept, but implemented directly on ThrottlingProgressHandler.
- The full sync code was passing the progress handler in a complicated
way that may once have been required, but no longer is.
- On the Qt side, timers are now stopped before deletion, or they keep
running for a few seconds.
- I left the ChangeTracker using a closure, as it's used for both importing
and syncing.
2023-06-19 05:48:32 +02:00
|
|
|
col.check_database()?;
|
2021-01-16 11:38:16 +01:00
|
|
|
|
2021-06-21 04:32:11 +02:00
|
|
|
assert!(col.storage.get_tag("one")?.unwrap().expanded);
|
|
|
|
assert!(!col.storage.get_tag("two")?.unwrap().expanded);
|
2021-01-16 11:38:16 +01:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-05-10 10:09:18 +02:00
|
|
|
}
|