in/out -> request/response
The saved characters weren't worth the increased difficulty when reading, and the fact that we were deviating from protobuf norms.
This commit is contained in:
parent
42c71cd204
commit
c79f8ba88f
@ -6,14 +6,14 @@ persistent = no
|
||||
ignored-classes=
|
||||
BrowserColumns,
|
||||
BrowserRow,
|
||||
FormatTimespanIn,
|
||||
FormatTimespanRequest,
|
||||
CardAnswer,
|
||||
QueuedCards,
|
||||
UnburyDeckIn,
|
||||
BuryOrSuspendCardsIn,
|
||||
NoteFieldsCheckOut,
|
||||
UnburyDeckRequest,
|
||||
BuryOrSuspendCardsRequest,
|
||||
NoteFieldsCheckResponse,
|
||||
BackendError,
|
||||
SetDeckCollapsedIn,
|
||||
SetDeckCollapsedRequest,
|
||||
|
||||
[REPORTS]
|
||||
output-format=colorized
|
||||
|
@ -132,14 +132,14 @@ class RustBackend(RustBackendGenerated):
|
||||
|
||||
def translate_string_in(
|
||||
module_index: int, message_index: int, **kwargs: Union[str, int, float]
|
||||
) -> pb.TranslateStringIn:
|
||||
) -> pb.TranslateStringRequest:
|
||||
args = {}
|
||||
for (k, v) in kwargs.items():
|
||||
if isinstance(v, str):
|
||||
args[k] = pb.TranslateArgValue(str=v)
|
||||
else:
|
||||
args[k] = pb.TranslateArgValue(number=v)
|
||||
return pb.TranslateStringIn(
|
||||
return pb.TranslateStringRequest(
|
||||
module_index=module_index, message_index=message_index, args=args
|
||||
)
|
||||
|
||||
|
@ -107,7 +107,7 @@ def get_input_assign(msg):
|
||||
def render_method(service_idx, method_idx, method):
|
||||
input_name = method.input_type.name
|
||||
if (
|
||||
(input_name.endswith("In") or len(method.input_type.fields) < 2)
|
||||
(input_name.endswith("Request") or len(method.input_type.fields) < 2)
|
||||
and not method.input_type.oneofs
|
||||
and not method.name in SKIP_UNROLL_INPUT
|
||||
):
|
||||
|
@ -21,9 +21,9 @@ from anki.utils import from_json_bytes, ids2str, intTime, legacy_func, to_json_b
|
||||
DeckTreeNode = _pb.DeckTreeNode
|
||||
DeckNameId = _pb.DeckNameId
|
||||
FilteredDeckConfig = _pb.Deck.Filtered
|
||||
DeckCollapseScope = _pb.SetDeckCollapsedIn.Scope
|
||||
DeckCollapseScope = _pb.SetDeckCollapsedRequest.Scope
|
||||
DeckConfigsForUpdate = _pb.DeckConfigsForUpdate
|
||||
UpdateDeckConfigs = _pb.UpdateDeckConfigsIn
|
||||
UpdateDeckConfigs = _pb.UpdateDeckConfigsRequest
|
||||
|
||||
# legacy code may pass this in as the type argument to .id()
|
||||
defaultDeck = 0
|
||||
|
@ -13,7 +13,7 @@ import anki._backend.backend_pb2 as _pb
|
||||
|
||||
# public exports
|
||||
TR = anki._backend.LegacyTranslationEnum
|
||||
FormatTimeSpan = _pb.FormatTimespanIn
|
||||
FormatTimeSpan = _pb.FormatTimespanRequest
|
||||
|
||||
|
||||
langs = sorted(
|
||||
|
@ -45,7 +45,7 @@ class ExtractedLatexOutput:
|
||||
latex: List[ExtractedLatex]
|
||||
|
||||
@staticmethod
|
||||
def from_proto(proto: _pb.ExtractLatexOut) -> ExtractedLatexOutput:
|
||||
def from_proto(proto: _pb.ExtractLatexResponse) -> ExtractedLatexOutput:
|
||||
return ExtractedLatexOutput(
|
||||
html=proto.text,
|
||||
latex=[
|
||||
|
@ -29,7 +29,7 @@ def media_paths_from_col_path(col_path: str) -> Tuple[str, str]:
|
||||
return (media_folder, media_db)
|
||||
|
||||
|
||||
CheckMediaOut = _pb.CheckMediaOut
|
||||
CheckMediaResponse = _pb.CheckMediaResponse
|
||||
|
||||
|
||||
# fixme: look into whether we can drop chdir() below
|
||||
@ -212,7 +212,7 @@ class MediaManager:
|
||||
# Checking media
|
||||
##########################################################################
|
||||
|
||||
def check(self) -> CheckMediaOut:
|
||||
def check(self) -> CheckMediaResponse:
|
||||
output = self.col._backend.check_media()
|
||||
# files may have been renamed on disk, so an undo at this point could
|
||||
# break file references
|
||||
|
@ -24,7 +24,7 @@ NotetypeNameId = _pb.NotetypeNameId
|
||||
NotetypeNameIdUseCount = _pb.NotetypeNameIdUseCount
|
||||
NotetypeNames = _pb.NotetypeNames
|
||||
ChangeNotetypeInfo = _pb.ChangeNotetypeInfo
|
||||
ChangeNotetypeIn = _pb.ChangeNotetypeIn
|
||||
ChangeNotetypeRequest = _pb.ChangeNotetypeRequest
|
||||
|
||||
# legacy types
|
||||
NotetypeDict = Dict[str, Any]
|
||||
@ -447,12 +447,12 @@ and notes.mid = ? and cards.ord = ?""",
|
||||
old_notetype_id=old_notetype_id, new_notetype_id=new_notetype_id
|
||||
)
|
||||
|
||||
def change_notetype_of_notes(self, input: ChangeNotetypeIn) -> OpChanges:
|
||||
def change_notetype_of_notes(self, input: ChangeNotetypeRequest) -> OpChanges:
|
||||
"""Assign a new notetype, optionally altering field/template order.
|
||||
|
||||
To get defaults, use
|
||||
|
||||
input = ChangeNotetypeIn()
|
||||
input = ChangeNotetypeRequest()
|
||||
input.ParseFromString(col.models.change_notetype_info(...))
|
||||
input.note_ids.extend([...])
|
||||
|
||||
@ -482,7 +482,7 @@ and notes.mid = ? and cards.ord = ?""",
|
||||
template_map = self._convert_legacy_map(cmap, len(newModel["tmpls"]))
|
||||
|
||||
self.col._backend.change_notetype(
|
||||
ChangeNotetypeIn(
|
||||
ChangeNotetypeRequest(
|
||||
note_ids=nids,
|
||||
new_fields=field_map,
|
||||
new_templates=template_map,
|
||||
|
@ -14,8 +14,8 @@ from anki.consts import MODEL_STD
|
||||
from anki.models import NotetypeDict, NotetypeId, TemplateDict
|
||||
from anki.utils import joinFields
|
||||
|
||||
DuplicateOrEmptyResult = _pb.NoteFieldsCheckOut.State
|
||||
NoteFieldsCheckResult = _pb.NoteFieldsCheckOut.State
|
||||
DuplicateOrEmptyResult = _pb.NoteFieldsCheckResponse.State
|
||||
NoteFieldsCheckResult = _pb.NoteFieldsCheckResponse.State
|
||||
|
||||
# types
|
||||
NoteId = NewType("NoteId", int)
|
||||
|
@ -8,7 +8,7 @@ import anki._backend.backend_pb2 as _pb
|
||||
from anki.collection import OpChanges, OpChangesWithCount, OpChangesWithId
|
||||
from anki.config import Config
|
||||
|
||||
SchedTimingToday = _pb.SchedTimingTodayOut
|
||||
SchedTimingToday = _pb.SchedTimingTodayResponse
|
||||
|
||||
|
||||
from typing import List, Optional, Sequence
|
||||
@ -19,9 +19,9 @@ from anki.decks import DeckConfigDict, DeckId, DeckTreeNode
|
||||
from anki.notes import NoteId
|
||||
from anki.utils import ids2str, intTime
|
||||
|
||||
CongratsInfo = _pb.CongratsInfoOut
|
||||
UnburyDeck = _pb.UnburyDeckIn
|
||||
BuryOrSuspend = _pb.BuryOrSuspendCardsIn
|
||||
CongratsInfo = _pb.CongratsInfoResponse
|
||||
UnburyDeck = _pb.UnburyDeckRequest
|
||||
BuryOrSuspend = _pb.BuryOrSuspendCardsRequest
|
||||
FilteredDeckForUpdate = _pb.FilteredDeckForUpdate
|
||||
|
||||
|
||||
|
@ -18,8 +18,8 @@ from anki.lang import FormatTimeSpan
|
||||
from anki.scheduler.legacy import SchedulerBaseWithLegacy
|
||||
from anki.utils import ids2str, intTime
|
||||
|
||||
CountsForDeckToday = _pb.CountsForDeckTodayOut
|
||||
SchedTimingToday = _pb.SchedTimingTodayOut
|
||||
CountsForDeckToday = _pb.CountsForDeckTodayResponse
|
||||
SchedTimingToday = _pb.SchedTimingTodayResponse
|
||||
|
||||
# legacy type alias
|
||||
QueueConfig = Dict[str, Any]
|
||||
|
@ -5,8 +5,8 @@ import anki._backend.backend_pb2 as _pb
|
||||
|
||||
# public exports
|
||||
SyncAuth = _pb.SyncAuth
|
||||
SyncOutput = _pb.SyncCollectionOut
|
||||
SyncStatus = _pb.SyncStatusOut
|
||||
SyncOutput = _pb.SyncCollectionResponse
|
||||
SyncStatus = _pb.SyncStatusResponse
|
||||
|
||||
|
||||
# Legacy attributes some add-ons may be using
|
||||
|
@ -27,9 +27,9 @@ except ImportError as e:
|
||||
from flask import Response
|
||||
|
||||
from anki import Collection
|
||||
from anki._backend.backend_pb2 import SyncServerMethodIn
|
||||
from anki._backend.backend_pb2 import SyncServerMethodRequest
|
||||
|
||||
Method = SyncServerMethodIn.Method # pylint: disable=no-member
|
||||
Method = SyncServerMethodRequest.Method # pylint: disable=no-member
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
col: Collection
|
||||
@ -116,7 +116,7 @@ def after_full_sync() -> None:
|
||||
|
||||
def get_method(
|
||||
method_str: str,
|
||||
) -> Optional[SyncServerMethodIn.Method.V]: # pylint: disable=no-member
|
||||
) -> Optional[SyncServerMethodRequest.Method.V]: # pylint: disable=no-member
|
||||
s = method_str
|
||||
if s == "hostKey":
|
||||
return Method.HOST_KEY
|
||||
|
@ -65,7 +65,7 @@ class PartiallyRenderedCard:
|
||||
latex_svg: bool
|
||||
|
||||
@classmethod
|
||||
def from_proto(cls, out: _pb.RenderCardOut) -> PartiallyRenderedCard:
|
||||
def from_proto(cls, out: _pb.RenderCardResponse) -> PartiallyRenderedCard:
|
||||
qnodes = cls.nodes_from_proto(out.question_nodes)
|
||||
anodes = cls.nodes_from_proto(out.answer_nodes)
|
||||
|
||||
|
@ -11,10 +11,10 @@ ignored-classes=
|
||||
SearchNode,
|
||||
Config,
|
||||
OpChanges,
|
||||
UnburyDeckIn,
|
||||
UnburyDeckRequest,
|
||||
CardAnswer,
|
||||
QueuedCards,
|
||||
ChangeNotetypeIn,
|
||||
ChangeNotetypeRequest,
|
||||
|
||||
[REPORTS]
|
||||
output-format=colorized
|
||||
|
@ -8,7 +8,7 @@ from typing import Sequence
|
||||
import aqt
|
||||
import aqt.deckconf
|
||||
from anki.collection import OpChanges
|
||||
from anki.models import ChangeNotetypeIn, NotetypeId
|
||||
from anki.models import ChangeNotetypeRequest, NotetypeId
|
||||
from anki.notes import NoteId
|
||||
from aqt.operations.notetype import change_notetype_of_notes
|
||||
from aqt.qt import *
|
||||
@ -70,7 +70,7 @@ class ChangeNotetypeDialog(QDialog):
|
||||
QDialog.reject(self)
|
||||
|
||||
def save(self, data: bytes) -> None:
|
||||
input = ChangeNotetypeIn()
|
||||
input = ChangeNotetypeRequest()
|
||||
input.ParseFromString(data)
|
||||
|
||||
if not self.mw.confirm_schema_modification():
|
||||
|
@ -11,7 +11,7 @@ from typing import Iterable, List, Optional, Sequence, TypeVar
|
||||
import aqt
|
||||
from anki.collection import SearchNode
|
||||
from anki.errors import Interrupted
|
||||
from anki.media import CheckMediaOut
|
||||
from anki.media import CheckMediaResponse
|
||||
from aqt.qt import *
|
||||
from aqt.utils import (
|
||||
askUser,
|
||||
@ -80,7 +80,7 @@ class MediaChecker:
|
||||
|
||||
self.mw.taskman.run_on_main(lambda: self.mw.progress.update(label=label))
|
||||
|
||||
def _check(self) -> CheckMediaOut:
|
||||
def _check(self) -> CheckMediaResponse:
|
||||
"Run the check on a background thread."
|
||||
return self.mw.col.media.check()
|
||||
|
||||
@ -93,7 +93,7 @@ class MediaChecker:
|
||||
if isinstance(exc, Interrupted):
|
||||
return
|
||||
|
||||
output: CheckMediaOut = future.result()
|
||||
output: CheckMediaResponse = future.result()
|
||||
report = output.report
|
||||
|
||||
# show report and offer to delete
|
||||
|
@ -4,7 +4,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from anki.collection import OpChanges, OpChangesWithId
|
||||
from anki.models import ChangeNotetypeIn, NotetypeDict, NotetypeId
|
||||
from anki.models import ChangeNotetypeRequest, NotetypeDict, NotetypeId
|
||||
from aqt import QWidget
|
||||
from aqt.operations import CollectionOp
|
||||
|
||||
@ -34,6 +34,6 @@ def remove_notetype(
|
||||
|
||||
|
||||
def change_notetype_of_notes(
|
||||
*, parent: QWidget, input: ChangeNotetypeIn
|
||||
*, parent: QWidget, input: ChangeNotetypeRequest
|
||||
) -> CollectionOp[OpChanges]:
|
||||
return CollectionOp(parent, lambda col: col.models.change_notetype_of_notes(input))
|
||||
|
@ -115,47 +115,48 @@ enum ServiceIndex {
|
||||
}
|
||||
|
||||
service SchedulingService {
|
||||
rpc SchedTimingToday(Empty) returns (SchedTimingTodayOut);
|
||||
rpc SchedTimingToday(Empty) returns (SchedTimingTodayResponse);
|
||||
rpc StudiedToday(Empty) returns (String);
|
||||
rpc StudiedTodayMessage(StudiedTodayMessageIn) returns (String);
|
||||
rpc UpdateStats(UpdateStatsIn) returns (Empty);
|
||||
rpc ExtendLimits(ExtendLimitsIn) returns (Empty);
|
||||
rpc CountsForDeckToday(DeckId) returns (CountsForDeckTodayOut);
|
||||
rpc CongratsInfo(Empty) returns (CongratsInfoOut);
|
||||
rpc StudiedTodayMessage(StudiedTodayMessageRequest) returns (String);
|
||||
rpc UpdateStats(UpdateStatsRequest) returns (Empty);
|
||||
rpc ExtendLimits(ExtendLimitsRequest) returns (Empty);
|
||||
rpc CountsForDeckToday(DeckId) returns (CountsForDeckTodayResponse);
|
||||
rpc CongratsInfo(Empty) returns (CongratsInfoResponse);
|
||||
rpc RestoreBuriedAndSuspendedCards(CardIds) returns (OpChanges);
|
||||
rpc UnburyDeck(UnburyDeckIn) returns (OpChanges);
|
||||
rpc BuryOrSuspendCards(BuryOrSuspendCardsIn) returns (OpChangesWithCount);
|
||||
rpc UnburyDeck(UnburyDeckRequest) returns (OpChanges);
|
||||
rpc BuryOrSuspendCards(BuryOrSuspendCardsRequest)
|
||||
returns (OpChangesWithCount);
|
||||
rpc EmptyFilteredDeck(DeckId) returns (OpChanges);
|
||||
rpc RebuildFilteredDeck(DeckId) returns (OpChangesWithCount);
|
||||
rpc ScheduleCardsAsNew(ScheduleCardsAsNewIn) returns (OpChanges);
|
||||
rpc SetDueDate(SetDueDateIn) returns (OpChanges);
|
||||
rpc SortCards(SortCardsIn) returns (OpChangesWithCount);
|
||||
rpc SortDeck(SortDeckIn) returns (OpChangesWithCount);
|
||||
rpc ScheduleCardsAsNew(ScheduleCardsAsNewRequest) returns (OpChanges);
|
||||
rpc SetDueDate(SetDueDateRequest) returns (OpChanges);
|
||||
rpc SortCards(SortCardsRequest) returns (OpChangesWithCount);
|
||||
rpc SortDeck(SortDeckRequest) returns (OpChangesWithCount);
|
||||
rpc GetNextCardStates(CardId) returns (NextCardStates);
|
||||
rpc DescribeNextStates(NextCardStates) returns (StringList);
|
||||
rpc StateIsLeech(SchedulingState) returns (Bool);
|
||||
rpc AnswerCard(CardAnswer) returns (OpChanges);
|
||||
rpc UpgradeScheduler(Empty) returns (Empty);
|
||||
rpc GetQueuedCards(GetQueuedCardsIn) returns (QueuedCards);
|
||||
rpc GetQueuedCards(GetQueuedCardsRequest) returns (QueuedCards);
|
||||
}
|
||||
|
||||
service DecksService {
|
||||
rpc AddDeckLegacy(Json) returns (OpChangesWithId);
|
||||
rpc AddOrUpdateDeckLegacy(AddOrUpdateDeckLegacyIn) returns (DeckId);
|
||||
rpc DeckTree(DeckTreeIn) returns (DeckTreeNode);
|
||||
rpc AddOrUpdateDeckLegacy(AddOrUpdateDeckLegacyRequest) returns (DeckId);
|
||||
rpc DeckTree(DeckTreeRequest) returns (DeckTreeNode);
|
||||
rpc DeckTreeLegacy(Empty) returns (Json);
|
||||
rpc GetAllDecksLegacy(Empty) returns (Json);
|
||||
rpc GetDeckIdByName(String) returns (DeckId);
|
||||
rpc GetDeck(DeckId) returns (Deck);
|
||||
rpc UpdateDeck(Deck) returns (OpChanges);
|
||||
rpc UpdateDeckLegacy(Json) returns (OpChanges);
|
||||
rpc SetDeckCollapsed(SetDeckCollapsedIn) returns (OpChanges);
|
||||
rpc SetDeckCollapsed(SetDeckCollapsedRequest) returns (OpChanges);
|
||||
rpc GetDeckLegacy(DeckId) returns (Json);
|
||||
rpc GetDeckNames(GetDeckNamesIn) returns (DeckNames);
|
||||
rpc GetDeckNames(GetDeckNamesRequest) returns (DeckNames);
|
||||
rpc NewDeckLegacy(Bool) returns (Json);
|
||||
rpc RemoveDecks(DeckIds) returns (OpChangesWithCount);
|
||||
rpc ReparentDecks(ReparentDecksIn) returns (OpChangesWithCount);
|
||||
rpc RenameDeck(RenameDeckIn) returns (OpChanges);
|
||||
rpc ReparentDecks(ReparentDecksRequest) returns (OpChangesWithCount);
|
||||
rpc RenameDeck(RenameDeckRequest) returns (OpChanges);
|
||||
rpc GetOrCreateFilteredDeck(DeckId) returns (FilteredDeckForUpdate);
|
||||
rpc AddOrUpdateFilteredDeck(FilteredDeckForUpdate) returns (OpChangesWithId);
|
||||
rpc FilteredDeckOrderLabels(Empty) returns (StringList);
|
||||
@ -165,16 +166,17 @@ service DecksService {
|
||||
|
||||
service NotesService {
|
||||
rpc NewNote(NotetypeId) returns (Note);
|
||||
rpc AddNote(AddNoteIn) returns (AddNoteOut);
|
||||
rpc DefaultsForAdding(DefaultsForAddingIn) returns (DeckAndNotetype);
|
||||
rpc AddNote(AddNoteRequest) returns (AddNoteResponse);
|
||||
rpc DefaultsForAdding(DefaultsForAddingRequest) returns (DeckAndNotetype);
|
||||
rpc DefaultDeckForNotetype(NotetypeId) returns (DeckId);
|
||||
rpc UpdateNote(UpdateNoteIn) returns (OpChanges);
|
||||
rpc UpdateNote(UpdateNoteRequest) returns (OpChanges);
|
||||
rpc GetNote(NoteId) returns (Note);
|
||||
rpc RemoveNotes(RemoveNotesIn) returns (OpChangesWithCount);
|
||||
rpc ClozeNumbersInNote(Note) returns (ClozeNumbersInNoteOut);
|
||||
rpc AfterNoteUpdates(AfterNoteUpdatesIn) returns (OpChangesWithCount);
|
||||
rpc FieldNamesForNotes(FieldNamesForNotesIn) returns (FieldNamesForNotesOut);
|
||||
rpc NoteFieldsCheck(Note) returns (NoteFieldsCheckOut);
|
||||
rpc RemoveNotes(RemoveNotesRequest) returns (OpChangesWithCount);
|
||||
rpc ClozeNumbersInNote(Note) returns (ClozeNumbersInNoteResponse);
|
||||
rpc AfterNoteUpdates(AfterNoteUpdatesRequest) returns (OpChangesWithCount);
|
||||
rpc FieldNamesForNotes(FieldNamesForNotesRequest)
|
||||
returns (FieldNamesForNotesResponse);
|
||||
rpc NoteFieldsCheck(Note) returns (NoteFieldsCheckResponse);
|
||||
rpc CardsOfNote(NoteId) returns (CardIds);
|
||||
}
|
||||
|
||||
@ -183,24 +185,24 @@ service SyncService {
|
||||
rpc AbortSync(Empty) returns (Empty);
|
||||
rpc AbortMediaSync(Empty) returns (Empty);
|
||||
rpc BeforeUpload(Empty) returns (Empty);
|
||||
rpc SyncLogin(SyncLoginIn) returns (SyncAuth);
|
||||
rpc SyncStatus(SyncAuth) returns (SyncStatusOut);
|
||||
rpc SyncCollection(SyncAuth) returns (SyncCollectionOut);
|
||||
rpc SyncLogin(SyncLoginRequest) returns (SyncAuth);
|
||||
rpc SyncStatus(SyncAuth) returns (SyncStatusResponse);
|
||||
rpc SyncCollection(SyncAuth) returns (SyncCollectionResponse);
|
||||
rpc FullUpload(SyncAuth) returns (Empty);
|
||||
rpc FullDownload(SyncAuth) returns (Empty);
|
||||
rpc SyncServerMethod(SyncServerMethodIn) returns (Json);
|
||||
rpc SyncServerMethod(SyncServerMethodRequest) returns (Json);
|
||||
}
|
||||
|
||||
service ConfigService {
|
||||
rpc GetConfigJson(String) returns (Json);
|
||||
rpc SetConfigJson(SetConfigJsonIn) returns (OpChanges);
|
||||
rpc SetConfigJsonNoUndo(SetConfigJsonIn) returns (Empty);
|
||||
rpc SetConfigJson(SetConfigJsonRequest) returns (OpChanges);
|
||||
rpc SetConfigJsonNoUndo(SetConfigJsonRequest) returns (Empty);
|
||||
rpc RemoveConfig(String) returns (OpChanges);
|
||||
rpc GetAllConfig(Empty) returns (Json);
|
||||
rpc GetConfigBool(Config.Bool) returns (Bool);
|
||||
rpc SetConfigBool(SetConfigBoolIn) returns (OpChanges);
|
||||
rpc SetConfigBool(SetConfigBoolRequest) returns (OpChanges);
|
||||
rpc GetConfigString(Config.String) returns (String);
|
||||
rpc SetConfigString(SetConfigStringIn) returns (OpChanges);
|
||||
rpc SetConfigString(SetConfigStringRequest) returns (OpChanges);
|
||||
rpc GetPreferences(Empty) returns (Preferences);
|
||||
rpc SetPreferences(Preferences) returns (OpChanges);
|
||||
}
|
||||
@ -210,7 +212,7 @@ service NotetypesService {
|
||||
rpc UpdateNotetype(Notetype) returns (OpChanges);
|
||||
rpc AddNotetypeLegacy(Json) returns (OpChangesWithId);
|
||||
rpc UpdateNotetypeLegacy(Json) returns (OpChanges);
|
||||
rpc AddOrUpdateNotetype(AddOrUpdateNotetypeIn) returns (NotetypeId);
|
||||
rpc AddOrUpdateNotetype(AddOrUpdateNotetypeRequest) returns (NotetypeId);
|
||||
rpc GetStockNotetypeLegacy(StockNotetype) returns (Json);
|
||||
rpc GetNotetype(NotetypeId) returns (Notetype);
|
||||
rpc GetNotetypeLegacy(NotetypeId) returns (Json);
|
||||
@ -218,24 +220,26 @@ service NotetypesService {
|
||||
rpc GetNotetypeNamesAndCounts(Empty) returns (NotetypeUseCounts);
|
||||
rpc GetNotetypeIdByName(String) returns (NotetypeId);
|
||||
rpc RemoveNotetype(NotetypeId) returns (OpChanges);
|
||||
rpc GetAuxNotetypeConfigKey(GetAuxConfigKeyIn) returns (String);
|
||||
rpc GetAuxTemplateConfigKey(GetAuxTemplateConfigKeyIn) returns (String);
|
||||
rpc GetAuxNotetypeConfigKey(GetAuxConfigKeyRequest) returns (String);
|
||||
rpc GetAuxTemplateConfigKey(GetAuxTemplateConfigKeyRequest) returns (String);
|
||||
rpc GetSingleNotetypeOfNotes(NoteIds) returns (NotetypeId);
|
||||
rpc GetChangeNotetypeInfo(GetChangeNotetypeInfoIn)
|
||||
rpc GetChangeNotetypeInfo(GetChangeNotetypeInfoRequest)
|
||||
returns (ChangeNotetypeInfo);
|
||||
rpc ChangeNotetype(ChangeNotetypeIn) returns (OpChanges);
|
||||
rpc ChangeNotetype(ChangeNotetypeRequest) returns (OpChanges);
|
||||
}
|
||||
|
||||
service CardRenderingService {
|
||||
rpc ExtractAVTags(ExtractAVTagsIn) returns (ExtractAVTagsOut);
|
||||
rpc ExtractLatex(ExtractLatexIn) returns (ExtractLatexOut);
|
||||
rpc ExtractAVTags(ExtractAVTagsRequest) returns (ExtractAVTagsResponse);
|
||||
rpc ExtractLatex(ExtractLatexRequest) returns (ExtractLatexResponse);
|
||||
rpc GetEmptyCards(Empty) returns (EmptyCardsReport);
|
||||
rpc RenderExistingCard(RenderExistingCardIn) returns (RenderCardOut);
|
||||
rpc RenderUncommittedCard(RenderUncommittedCardIn) returns (RenderCardOut);
|
||||
rpc RenderUncommittedCardLegacy(RenderUncommittedCardLegacyIn)
|
||||
returns (RenderCardOut);
|
||||
rpc RenderExistingCard(RenderExistingCardRequest)
|
||||
returns (RenderCardResponse);
|
||||
rpc RenderUncommittedCard(RenderUncommittedCardRequest)
|
||||
returns (RenderCardResponse);
|
||||
rpc RenderUncommittedCardLegacy(RenderUncommittedCardLegacyRequest)
|
||||
returns (RenderCardResponse);
|
||||
rpc StripAVTags(String) returns (String);
|
||||
rpc RenderMarkdown(RenderMarkdownIn) returns (String);
|
||||
rpc RenderMarkdown(RenderMarkdownRequest) returns (String);
|
||||
}
|
||||
|
||||
service DeckConfigService {
|
||||
@ -246,29 +250,29 @@ service DeckConfigService {
|
||||
rpc NewDeckConfigLegacy(Empty) returns (Json);
|
||||
rpc RemoveDeckConfig(DeckConfigId) returns (Empty);
|
||||
rpc GetDeckConfigsForUpdate(DeckId) returns (DeckConfigsForUpdate);
|
||||
rpc UpdateDeckConfigs(UpdateDeckConfigsIn) returns (OpChanges);
|
||||
rpc UpdateDeckConfigs(UpdateDeckConfigsRequest) returns (OpChanges);
|
||||
}
|
||||
|
||||
service TagsService {
|
||||
rpc ClearUnusedTags(Empty) returns (OpChangesWithCount);
|
||||
rpc AllTags(Empty) returns (StringList);
|
||||
rpc RemoveTags(String) returns (OpChangesWithCount);
|
||||
rpc SetTagCollapsed(SetTagCollapsedIn) returns (OpChanges);
|
||||
rpc SetTagCollapsed(SetTagCollapsedRequest) returns (OpChanges);
|
||||
rpc TagTree(Empty) returns (TagTreeNode);
|
||||
rpc ReparentTags(ReparentTagsIn) returns (OpChangesWithCount);
|
||||
rpc RenameTags(RenameTagsIn) returns (OpChangesWithCount);
|
||||
rpc AddNoteTags(NoteIdsAndTagsIn) returns (OpChangesWithCount);
|
||||
rpc RemoveNoteTags(NoteIdsAndTagsIn) returns (OpChangesWithCount);
|
||||
rpc FindAndReplaceTag(FindAndReplaceTagIn) returns (OpChangesWithCount);
|
||||
rpc ReparentTags(ReparentTagsRequest) returns (OpChangesWithCount);
|
||||
rpc RenameTags(RenameTagsRequest) returns (OpChangesWithCount);
|
||||
rpc AddNoteTags(NoteIdsAndTagsRequest) returns (OpChangesWithCount);
|
||||
rpc RemoveNoteTags(NoteIdsAndTagsRequest) returns (OpChangesWithCount);
|
||||
rpc FindAndReplaceTag(FindAndReplaceTagRequest) returns (OpChangesWithCount);
|
||||
}
|
||||
|
||||
service SearchService {
|
||||
rpc BuildSearchString(SearchNode) returns (String);
|
||||
rpc SearchCards(SearchIn) returns (SearchOut);
|
||||
rpc SearchNotes(SearchIn) returns (SearchOut);
|
||||
rpc JoinSearchNodes(JoinSearchNodesIn) returns (String);
|
||||
rpc ReplaceSearchNode(ReplaceSearchNodeIn) returns (String);
|
||||
rpc FindAndReplace(FindAndReplaceIn) returns (OpChangesWithCount);
|
||||
rpc SearchCards(SearchRequest) returns (SearchResponse);
|
||||
rpc SearchNotes(SearchRequest) returns (SearchResponse);
|
||||
rpc JoinSearchNodes(JoinSearchNodesRequest) returns (String);
|
||||
rpc ReplaceSearchNode(ReplaceSearchNodeRequest) returns (String);
|
||||
rpc FindAndReplace(FindAndReplaceRequest) returns (OpChangesWithCount);
|
||||
rpc AllBrowserColumns(Empty) returns (BrowserColumns);
|
||||
rpc BrowserRowForId(Int64) returns (BrowserRow);
|
||||
rpc SetActiveBrowserColumns(StringList) returns (Empty);
|
||||
@ -276,29 +280,29 @@ service SearchService {
|
||||
|
||||
service StatsService {
|
||||
rpc CardStats(CardId) returns (String);
|
||||
rpc Graphs(GraphsIn) returns (GraphsOut);
|
||||
rpc Graphs(GraphsRequest) returns (GraphsResponse);
|
||||
rpc GetGraphPreferences(Empty) returns (GraphPreferences);
|
||||
rpc SetGraphPreferences(GraphPreferences) returns (Empty);
|
||||
}
|
||||
|
||||
service MediaService {
|
||||
rpc CheckMedia(Empty) returns (CheckMediaOut);
|
||||
rpc TrashMediaFiles(TrashMediaFilesIn) returns (Empty);
|
||||
rpc AddMediaFile(AddMediaFileIn) returns (String);
|
||||
rpc CheckMedia(Empty) returns (CheckMediaResponse);
|
||||
rpc TrashMediaFiles(TrashMediaFilesRequest) returns (Empty);
|
||||
rpc AddMediaFile(AddMediaFileRequest) returns (String);
|
||||
rpc EmptyTrash(Empty) returns (Empty);
|
||||
rpc RestoreTrash(Empty) returns (Empty);
|
||||
}
|
||||
|
||||
service I18nService {
|
||||
rpc TranslateString(TranslateStringIn) returns (String);
|
||||
rpc FormatTimespan(FormatTimespanIn) returns (String);
|
||||
rpc I18nResources(I18nResourcesIn) returns (Json);
|
||||
rpc TranslateString(TranslateStringRequest) returns (String);
|
||||
rpc FormatTimespan(FormatTimespanRequest) returns (String);
|
||||
rpc I18nResources(I18nResourcesRequest) returns (Json);
|
||||
}
|
||||
|
||||
service CollectionService {
|
||||
rpc OpenCollection(OpenCollectionIn) returns (Empty);
|
||||
rpc CloseCollection(CloseCollectionIn) returns (Empty);
|
||||
rpc CheckDatabase(Empty) returns (CheckDatabaseOut);
|
||||
rpc OpenCollection(OpenCollectionRequest) returns (Empty);
|
||||
rpc CloseCollection(CloseCollectionRequest) returns (Empty);
|
||||
rpc CheckDatabase(Empty) returns (CheckDatabaseResponse);
|
||||
rpc GetUndoStatus(Empty) returns (UndoStatus);
|
||||
rpc Undo(Empty) returns (OpChangesAfterUndo);
|
||||
rpc Redo(Empty) returns (OpChangesAfterUndo);
|
||||
@ -310,10 +314,10 @@ service CollectionService {
|
||||
|
||||
service CardsService {
|
||||
rpc GetCard(CardId) returns (Card);
|
||||
rpc UpdateCard(UpdateCardIn) returns (OpChanges);
|
||||
rpc RemoveCards(RemoveCardsIn) returns (Empty);
|
||||
rpc SetDeck(SetDeckIn) returns (OpChangesWithCount);
|
||||
rpc SetFlag(SetFlagIn) returns (OpChangesWithCount);
|
||||
rpc UpdateCard(UpdateCardRequest) returns (OpChanges);
|
||||
rpc RemoveCards(RemoveCardsRequest) returns (Empty);
|
||||
rpc SetDeck(SetDeckRequest) returns (OpChangesWithCount);
|
||||
rpc SetFlag(SetFlagRequest) returns (OpChangesWithCount);
|
||||
}
|
||||
|
||||
// Protobuf stored in .anki2 files
|
||||
@ -669,12 +673,12 @@ message Progress {
|
||||
// Messages
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
message SchedTimingTodayOut {
|
||||
message SchedTimingTodayResponse {
|
||||
uint32 days_elapsed = 1;
|
||||
int64 next_day_at = 2;
|
||||
}
|
||||
|
||||
message DeckTreeIn {
|
||||
message DeckTreeRequest {
|
||||
// if non-zero, counts for the provided timestamp will be included
|
||||
int64 now = 1;
|
||||
int64 top_deck_id = 2;
|
||||
@ -697,26 +701,26 @@ message DeckTreeNode {
|
||||
repeated DeckTreeNode children = 3;
|
||||
}
|
||||
|
||||
message RenderExistingCardIn {
|
||||
message RenderExistingCardRequest {
|
||||
int64 card_id = 1;
|
||||
bool browser = 2;
|
||||
}
|
||||
|
||||
message RenderUncommittedCardIn {
|
||||
message RenderUncommittedCardRequest {
|
||||
Note note = 1;
|
||||
uint32 card_ord = 2;
|
||||
Notetype.Template template = 3;
|
||||
bool fill_empty = 4;
|
||||
}
|
||||
|
||||
message RenderUncommittedCardLegacyIn {
|
||||
message RenderUncommittedCardLegacyRequest {
|
||||
Note note = 1;
|
||||
uint32 card_ord = 2;
|
||||
bytes template = 3;
|
||||
bool fill_empty = 4;
|
||||
}
|
||||
|
||||
message RenderCardOut {
|
||||
message RenderCardResponse {
|
||||
repeated RenderedTemplateNode question_nodes = 1;
|
||||
repeated RenderedTemplateNode answer_nodes = 2;
|
||||
string css = 3;
|
||||
@ -736,12 +740,12 @@ message RenderedTemplateReplacement {
|
||||
repeated string filters = 3;
|
||||
}
|
||||
|
||||
message ExtractAVTagsIn {
|
||||
message ExtractAVTagsRequest {
|
||||
string text = 1;
|
||||
bool question_side = 2;
|
||||
}
|
||||
|
||||
message ExtractAVTagsOut {
|
||||
message ExtractAVTagsResponse {
|
||||
string text = 1;
|
||||
repeated AVTag av_tags = 2;
|
||||
}
|
||||
@ -761,13 +765,13 @@ message TTSTag {
|
||||
repeated string other_args = 5;
|
||||
}
|
||||
|
||||
message ExtractLatexIn {
|
||||
message ExtractLatexRequest {
|
||||
string text = 1;
|
||||
bool svg = 2;
|
||||
bool expand_clozes = 3;
|
||||
}
|
||||
|
||||
message ExtractLatexOut {
|
||||
message ExtractLatexResponse {
|
||||
string text = 1;
|
||||
repeated ExtractedLatex latex = 2;
|
||||
}
|
||||
@ -777,23 +781,23 @@ message ExtractedLatex {
|
||||
string latex_body = 2;
|
||||
}
|
||||
|
||||
message AddMediaFileIn {
|
||||
message AddMediaFileRequest {
|
||||
string desired_name = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
message CheckMediaOut {
|
||||
message CheckMediaResponse {
|
||||
repeated string unused = 1;
|
||||
repeated string missing = 2;
|
||||
string report = 3;
|
||||
bool have_trash = 4;
|
||||
}
|
||||
|
||||
message TrashMediaFilesIn {
|
||||
message TrashMediaFilesRequest {
|
||||
repeated string fnames = 1;
|
||||
}
|
||||
|
||||
message TranslateStringIn {
|
||||
message TranslateStringRequest {
|
||||
uint32 module_index = 1;
|
||||
uint32 message_index = 2;
|
||||
map<string, TranslateArgValue> args = 3;
|
||||
@ -806,7 +810,7 @@ message TranslateArgValue {
|
||||
}
|
||||
}
|
||||
|
||||
message FormatTimespanIn {
|
||||
message FormatTimespanRequest {
|
||||
enum Context {
|
||||
PRECISE = 0;
|
||||
ANSWER_BUTTONS = 1;
|
||||
@ -817,33 +821,33 @@ message FormatTimespanIn {
|
||||
Context context = 2;
|
||||
}
|
||||
|
||||
message I18nResourcesIn {
|
||||
message I18nResourcesRequest {
|
||||
repeated string modules = 1;
|
||||
}
|
||||
|
||||
message StudiedTodayMessageIn {
|
||||
message StudiedTodayMessageRequest {
|
||||
uint32 cards = 1;
|
||||
double seconds = 2;
|
||||
}
|
||||
|
||||
message CongratsLearnMessageIn {
|
||||
message CongratsLearnMessageRequest {
|
||||
float next_due = 1;
|
||||
uint32 remaining = 2;
|
||||
}
|
||||
|
||||
message OpenCollectionIn {
|
||||
message OpenCollectionRequest {
|
||||
string collection_path = 1;
|
||||
string media_folder_path = 2;
|
||||
string media_db_path = 3;
|
||||
string log_path = 4;
|
||||
}
|
||||
|
||||
message SearchIn {
|
||||
message SearchRequest {
|
||||
string search = 1;
|
||||
SortOrder order = 2;
|
||||
}
|
||||
|
||||
message SearchOut {
|
||||
message SearchResponse {
|
||||
repeated int64 ids = 1;
|
||||
}
|
||||
|
||||
@ -929,18 +933,18 @@ message SearchNode {
|
||||
}
|
||||
}
|
||||
|
||||
message JoinSearchNodesIn {
|
||||
message JoinSearchNodesRequest {
|
||||
SearchNode.Group.Joiner joiner = 1;
|
||||
SearchNode existing_node = 2;
|
||||
SearchNode additional_node = 3;
|
||||
}
|
||||
|
||||
message ReplaceSearchNodeIn {
|
||||
message ReplaceSearchNodeRequest {
|
||||
SearchNode existing_node = 1;
|
||||
SearchNode replacement_node = 2;
|
||||
}
|
||||
|
||||
message CloseCollectionIn {
|
||||
message CloseCollectionRequest {
|
||||
bool downgrade_to_schema11 = 1;
|
||||
}
|
||||
|
||||
@ -963,7 +967,7 @@ message DeckConfigsForUpdate {
|
||||
bool have_addons = 6;
|
||||
}
|
||||
|
||||
message UpdateDeckConfigsIn {
|
||||
message UpdateDeckConfigsRequest {
|
||||
int64 target_deck_id = 1;
|
||||
/// Unchanged, non-selected configs can be omitted. Deck will
|
||||
/// be set to whichever entry comes last.
|
||||
@ -972,12 +976,12 @@ message UpdateDeckConfigsIn {
|
||||
bool apply_to_children = 4;
|
||||
}
|
||||
|
||||
message SetTagCollapsedIn {
|
||||
message SetTagCollapsedRequest {
|
||||
string name = 1;
|
||||
bool collapsed = 2;
|
||||
}
|
||||
|
||||
message SetDeckCollapsedIn {
|
||||
message SetDeckCollapsedRequest {
|
||||
enum Scope {
|
||||
REVIEWER = 0;
|
||||
BROWSER = 1;
|
||||
@ -988,7 +992,7 @@ message SetDeckCollapsedIn {
|
||||
Scope scope = 3;
|
||||
}
|
||||
|
||||
message GetChangedTagsOut {
|
||||
message GetChangedTagsResponse {
|
||||
repeated string tags = 1;
|
||||
}
|
||||
|
||||
@ -999,17 +1003,17 @@ message TagTreeNode {
|
||||
bool collapsed = 4;
|
||||
}
|
||||
|
||||
message ReparentTagsIn {
|
||||
message ReparentTagsRequest {
|
||||
repeated string tags = 1;
|
||||
string new_parent = 2;
|
||||
}
|
||||
|
||||
message RenameTagsIn {
|
||||
message RenameTagsRequest {
|
||||
string current_prefix = 1;
|
||||
string new_prefix = 2;
|
||||
}
|
||||
|
||||
message SetConfigJsonIn {
|
||||
message SetConfigJsonRequest {
|
||||
string key = 1;
|
||||
bytes value_json = 2;
|
||||
bool undoable = 3;
|
||||
@ -1046,27 +1050,27 @@ message NotetypeNameIdUseCount {
|
||||
uint32 use_count = 3;
|
||||
}
|
||||
|
||||
message AddOrUpdateNotetypeIn {
|
||||
message AddOrUpdateNotetypeRequest {
|
||||
bytes json = 1;
|
||||
bool preserve_usn_and_mtime = 2;
|
||||
}
|
||||
|
||||
message AddNoteIn {
|
||||
message AddNoteRequest {
|
||||
Note note = 1;
|
||||
int64 deck_id = 2;
|
||||
}
|
||||
|
||||
message AddNoteOut {
|
||||
message AddNoteResponse {
|
||||
int64 note_id = 1;
|
||||
OpChanges changes = 2;
|
||||
}
|
||||
|
||||
message UpdateNoteIn {
|
||||
message UpdateNoteRequest {
|
||||
Note note = 1;
|
||||
bool skip_undo_entry = 2;
|
||||
}
|
||||
|
||||
message UpdateCardIn {
|
||||
message UpdateCardRequest {
|
||||
Card card = 1;
|
||||
bool skip_undo_entry = 2;
|
||||
}
|
||||
@ -1090,20 +1094,20 @@ message DeckNameId {
|
||||
string name = 2;
|
||||
}
|
||||
|
||||
message AddOrUpdateDeckLegacyIn {
|
||||
message AddOrUpdateDeckLegacyRequest {
|
||||
bytes deck = 1;
|
||||
bool preserve_usn_and_mtime = 2;
|
||||
}
|
||||
|
||||
message FieldNamesForNotesIn {
|
||||
message FieldNamesForNotesRequest {
|
||||
repeated int64 nids = 1;
|
||||
}
|
||||
|
||||
message FieldNamesForNotesOut {
|
||||
message FieldNamesForNotesResponse {
|
||||
repeated string fields = 1;
|
||||
}
|
||||
|
||||
message FindAndReplaceIn {
|
||||
message FindAndReplaceRequest {
|
||||
repeated int64 nids = 1;
|
||||
string search = 2;
|
||||
string replacement = 3;
|
||||
@ -1156,18 +1160,18 @@ message BrowserRow {
|
||||
uint32 font_size = 4;
|
||||
}
|
||||
|
||||
message AfterNoteUpdatesIn {
|
||||
message AfterNoteUpdatesRequest {
|
||||
repeated int64 nids = 1;
|
||||
bool mark_notes_modified = 2;
|
||||
bool generate_cards = 3;
|
||||
}
|
||||
|
||||
message NoteIdsAndTagsIn {
|
||||
message NoteIdsAndTagsRequest {
|
||||
repeated int64 note_ids = 1;
|
||||
string tags = 2;
|
||||
}
|
||||
|
||||
message FindAndReplaceTagIn {
|
||||
message FindAndReplaceTagRequest {
|
||||
repeated int64 note_ids = 1;
|
||||
string search = 2;
|
||||
string replacement = 3;
|
||||
@ -1175,7 +1179,7 @@ message FindAndReplaceTagIn {
|
||||
bool match_case = 5;
|
||||
}
|
||||
|
||||
message CheckDatabaseOut {
|
||||
message CheckDatabaseResponse {
|
||||
repeated string problems = 1;
|
||||
}
|
||||
|
||||
@ -1216,22 +1220,22 @@ message Preferences {
|
||||
Editing editing = 3;
|
||||
}
|
||||
|
||||
message ClozeNumbersInNoteOut {
|
||||
message ClozeNumbersInNoteResponse {
|
||||
repeated uint32 numbers = 1;
|
||||
}
|
||||
|
||||
message GetDeckNamesIn {
|
||||
message GetDeckNamesRequest {
|
||||
bool skip_empty_default = 1;
|
||||
// if unset, implies skip_empty_default
|
||||
bool include_filtered = 2;
|
||||
}
|
||||
|
||||
message ReparentDecksIn {
|
||||
message ReparentDecksRequest {
|
||||
repeated int64 deck_ids = 1;
|
||||
int64 new_parent = 2;
|
||||
}
|
||||
|
||||
message NoteFieldsCheckOut {
|
||||
message NoteFieldsCheckResponse {
|
||||
enum State {
|
||||
NORMAL = 0;
|
||||
EMPTY = 1;
|
||||
@ -1243,12 +1247,12 @@ message NoteFieldsCheckOut {
|
||||
State state = 1;
|
||||
}
|
||||
|
||||
message SyncLoginIn {
|
||||
message SyncLoginRequest {
|
||||
string username = 1;
|
||||
string password = 2;
|
||||
}
|
||||
|
||||
message SyncStatusOut {
|
||||
message SyncStatusResponse {
|
||||
enum Required {
|
||||
NO_CHANGES = 0;
|
||||
NORMAL_SYNC = 1;
|
||||
@ -1257,7 +1261,7 @@ message SyncStatusOut {
|
||||
Required required = 1;
|
||||
}
|
||||
|
||||
message SyncCollectionOut {
|
||||
message SyncCollectionResponse {
|
||||
enum ChangesRequired {
|
||||
NO_CHANGES = 0;
|
||||
NORMAL_SYNC = 1;
|
||||
@ -1278,7 +1282,7 @@ message SyncAuth {
|
||||
uint32 host_number = 2;
|
||||
}
|
||||
|
||||
message SyncServerMethodIn {
|
||||
message SyncServerMethodRequest {
|
||||
enum Method {
|
||||
HOST_KEY = 0;
|
||||
META = 1;
|
||||
@ -1298,39 +1302,39 @@ message SyncServerMethodIn {
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
message RemoveNotesIn {
|
||||
message RemoveNotesRequest {
|
||||
repeated int64 note_ids = 1;
|
||||
repeated int64 card_ids = 2;
|
||||
}
|
||||
|
||||
message RemoveCardsIn {
|
||||
message RemoveCardsRequest {
|
||||
repeated int64 card_ids = 1;
|
||||
}
|
||||
|
||||
message UpdateStatsIn {
|
||||
message UpdateStatsRequest {
|
||||
int64 deck_id = 1;
|
||||
int32 new_delta = 2;
|
||||
int32 review_delta = 4;
|
||||
int32 millisecond_delta = 5;
|
||||
}
|
||||
|
||||
message ExtendLimitsIn {
|
||||
message ExtendLimitsRequest {
|
||||
int64 deck_id = 1;
|
||||
int32 new_delta = 2;
|
||||
int32 review_delta = 3;
|
||||
}
|
||||
|
||||
message CountsForDeckTodayOut {
|
||||
message CountsForDeckTodayResponse {
|
||||
int32 new = 1;
|
||||
int32 review = 2;
|
||||
}
|
||||
|
||||
message GraphsIn {
|
||||
message GraphsRequest {
|
||||
string search = 1;
|
||||
uint32 days = 2;
|
||||
}
|
||||
|
||||
message GraphsOut {
|
||||
message GraphsResponse {
|
||||
repeated Card cards = 1;
|
||||
repeated RevlogEntry revlog = 2;
|
||||
uint32 days_elapsed = 3;
|
||||
@ -1373,7 +1377,7 @@ message RevlogEntry {
|
||||
ReviewKind review_kind = 9;
|
||||
}
|
||||
|
||||
message CongratsInfoOut {
|
||||
message CongratsInfoResponse {
|
||||
uint32 learn_remaining = 1;
|
||||
uint32 secs_until_next_learn = 2;
|
||||
bool review_remaining = 3;
|
||||
@ -1385,7 +1389,7 @@ message CongratsInfoOut {
|
||||
string deck_description = 9;
|
||||
}
|
||||
|
||||
message UnburyDeckIn {
|
||||
message UnburyDeckRequest {
|
||||
enum Mode {
|
||||
ALL = 0;
|
||||
SCHED_ONLY = 1;
|
||||
@ -1395,7 +1399,7 @@ message UnburyDeckIn {
|
||||
Mode mode = 2;
|
||||
}
|
||||
|
||||
message BuryOrSuspendCardsIn {
|
||||
message BuryOrSuspendCardsRequest {
|
||||
enum Mode {
|
||||
SUSPEND = 0;
|
||||
BURY_SCHED = 1;
|
||||
@ -1406,18 +1410,18 @@ message BuryOrSuspendCardsIn {
|
||||
Mode mode = 3;
|
||||
}
|
||||
|
||||
message ScheduleCardsAsNewIn {
|
||||
message ScheduleCardsAsNewRequest {
|
||||
repeated int64 card_ids = 1;
|
||||
bool log = 2;
|
||||
}
|
||||
|
||||
message SetDueDateIn {
|
||||
message SetDueDateRequest {
|
||||
repeated int64 card_ids = 1;
|
||||
string days = 2;
|
||||
Config.String config_key = 3;
|
||||
}
|
||||
|
||||
message SortCardsIn {
|
||||
message SortCardsRequest {
|
||||
repeated int64 card_ids = 1;
|
||||
uint32 starting_from = 2;
|
||||
uint32 step_size = 3;
|
||||
@ -1425,12 +1429,12 @@ message SortCardsIn {
|
||||
bool shift_existing = 5;
|
||||
}
|
||||
|
||||
message SortDeckIn {
|
||||
message SortDeckRequest {
|
||||
int64 deck_id = 1;
|
||||
bool randomize = 2;
|
||||
}
|
||||
|
||||
message SetDeckIn {
|
||||
message SetDeckRequest {
|
||||
repeated int64 card_ids = 1;
|
||||
int64 deck_id = 2;
|
||||
}
|
||||
@ -1469,19 +1473,19 @@ message Config {
|
||||
}
|
||||
}
|
||||
|
||||
message SetConfigBoolIn {
|
||||
message SetConfigBoolRequest {
|
||||
Config.Bool.Key key = 1;
|
||||
bool value = 2;
|
||||
bool undoable = 3;
|
||||
}
|
||||
|
||||
message SetConfigStringIn {
|
||||
message SetConfigStringRequest {
|
||||
Config.String.Key key = 1;
|
||||
string value = 2;
|
||||
bool undoable = 3;
|
||||
}
|
||||
|
||||
message RenderMarkdownIn {
|
||||
message RenderMarkdownRequest {
|
||||
string markdown = 1;
|
||||
bool sanitize = 2;
|
||||
}
|
||||
@ -1557,7 +1561,7 @@ message CardAnswer {
|
||||
uint32 milliseconds_taken = 6;
|
||||
}
|
||||
|
||||
message GetQueuedCardsIn {
|
||||
message GetQueuedCardsRequest {
|
||||
uint32 fetch_limit = 1;
|
||||
bool intraday_learning_only = 2;
|
||||
}
|
||||
@ -1612,7 +1616,7 @@ message OpChangesAfterUndo {
|
||||
uint32 counter = 5;
|
||||
}
|
||||
|
||||
message DefaultsForAddingIn {
|
||||
message DefaultsForAddingRequest {
|
||||
int64 home_deck_of_current_review_card = 1;
|
||||
}
|
||||
|
||||
@ -1621,7 +1625,7 @@ message DeckAndNotetype {
|
||||
int64 notetype_id = 2;
|
||||
}
|
||||
|
||||
message RenameDeckIn {
|
||||
message RenameDeckRequest {
|
||||
int64 deck_id = 1;
|
||||
string new_name = 2;
|
||||
}
|
||||
@ -1632,28 +1636,28 @@ message FilteredDeckForUpdate {
|
||||
Deck.Filtered config = 3;
|
||||
}
|
||||
|
||||
message SetFlagIn {
|
||||
message SetFlagRequest {
|
||||
repeated int64 card_ids = 1;
|
||||
uint32 flag = 2;
|
||||
}
|
||||
|
||||
message GetAuxConfigKeyIn {
|
||||
message GetAuxConfigKeyRequest {
|
||||
int64 id = 1;
|
||||
string key = 2;
|
||||
}
|
||||
|
||||
message GetAuxTemplateConfigKeyIn {
|
||||
message GetAuxTemplateConfigKeyRequest {
|
||||
int64 notetype_id = 1;
|
||||
uint32 card_ordinal = 2;
|
||||
string key = 3;
|
||||
}
|
||||
|
||||
message GetChangeNotetypeInfoIn {
|
||||
message GetChangeNotetypeInfoRequest {
|
||||
int64 old_notetype_id = 1;
|
||||
int64 new_notetype_id = 2;
|
||||
}
|
||||
|
||||
message ChangeNotetypeIn {
|
||||
message ChangeNotetypeRequest {
|
||||
repeated int64 note_ids = 1;
|
||||
// -1 is used to represent null, as nullable repeated fields
|
||||
// are unwieldy in protobuf
|
||||
@ -1669,5 +1673,5 @@ message ChangeNotetypeInfo {
|
||||
repeated string old_template_names = 2;
|
||||
repeated string new_field_names = 3;
|
||||
repeated string new_template_names = 4;
|
||||
ChangeNotetypeIn input = 5;
|
||||
ChangeNotetypeRequest input = 5;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ impl CardsService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn update_card(&self, input: pb::UpdateCardIn) -> Result<pb::OpChanges> {
|
||||
fn update_card(&self, input: pb::UpdateCardRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| {
|
||||
let mut card: Card = input.card.ok_or(AnkiError::NotFound)?.try_into()?;
|
||||
col.update_card_maybe_undoable(&mut card, !input.skip_undo_entry)
|
||||
@ -29,7 +29,7 @@ impl CardsService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn remove_cards(&self, input: pb::RemoveCardsIn) -> Result<pb::Empty> {
|
||||
fn remove_cards(&self, input: pb::RemoveCardsRequest) -> Result<pb::Empty> {
|
||||
self.with_col(|col| {
|
||||
col.transact_no_undo(|col| {
|
||||
col.remove_cards_and_orphaned_notes(
|
||||
@ -44,13 +44,13 @@ impl CardsService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn set_deck(&self, input: pb::SetDeckIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn set_deck(&self, input: pb::SetDeckRequest) -> Result<pb::OpChangesWithCount> {
|
||||
let cids: Vec<_> = input.card_ids.into_iter().map(CardId).collect();
|
||||
let deck_id = input.deck_id.into();
|
||||
self.with_col(|col| col.set_deck(&cids, deck_id).map(Into::into))
|
||||
}
|
||||
|
||||
fn set_flag(&self, input: pb::SetFlagIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn set_flag(&self, input: pb::SetFlagRequest) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
col.set_card_flag(&to_card_ids(input.card_ids), input.flag)
|
||||
.map(Into::into)
|
||||
|
@ -14,7 +14,10 @@ use crate::{
|
||||
};
|
||||
|
||||
impl CardRenderingService for Backend {
|
||||
fn extract_av_tags(&self, input: pb::ExtractAvTagsIn) -> Result<pb::ExtractAvTagsOut> {
|
||||
fn extract_av_tags(
|
||||
&self,
|
||||
input: pb::ExtractAvTagsRequest,
|
||||
) -> Result<pb::ExtractAvTagsResponse> {
|
||||
let (text, tags) = extract_av_tags(&input.text, input.question_side);
|
||||
let pt_tags = tags
|
||||
.into_iter()
|
||||
@ -40,13 +43,13 @@ impl CardRenderingService for Backend {
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(pb::ExtractAvTagsOut {
|
||||
Ok(pb::ExtractAvTagsResponse {
|
||||
text: text.into(),
|
||||
av_tags: pt_tags,
|
||||
})
|
||||
}
|
||||
|
||||
fn extract_latex(&self, input: pb::ExtractLatexIn) -> Result<pb::ExtractLatexOut> {
|
||||
fn extract_latex(&self, input: pb::ExtractLatexRequest) -> Result<pb::ExtractLatexResponse> {
|
||||
let func = if input.expand_clozes {
|
||||
extract_latex_expanding_clozes
|
||||
} else {
|
||||
@ -54,7 +57,7 @@ impl CardRenderingService for Backend {
|
||||
};
|
||||
let (text, extracted) = func(&input.text, input.svg);
|
||||
|
||||
Ok(pb::ExtractLatexOut {
|
||||
Ok(pb::ExtractLatexResponse {
|
||||
text,
|
||||
latex: extracted
|
||||
.into_iter()
|
||||
@ -88,7 +91,10 @@ impl CardRenderingService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn render_existing_card(&self, input: pb::RenderExistingCardIn) -> Result<pb::RenderCardOut> {
|
||||
fn render_existing_card(
|
||||
&self,
|
||||
input: pb::RenderExistingCardRequest,
|
||||
) -> Result<pb::RenderCardResponse> {
|
||||
self.with_col(|col| {
|
||||
col.render_existing_card(CardId(input.card_id), input.browser)
|
||||
.map(Into::into)
|
||||
@ -97,8 +103,8 @@ impl CardRenderingService for Backend {
|
||||
|
||||
fn render_uncommitted_card(
|
||||
&self,
|
||||
input: pb::RenderUncommittedCardIn,
|
||||
) -> Result<pb::RenderCardOut> {
|
||||
input: pb::RenderUncommittedCardRequest,
|
||||
) -> Result<pb::RenderCardResponse> {
|
||||
let template = input.template.ok_or(AnkiError::NotFound)?.into();
|
||||
let mut note = input
|
||||
.note
|
||||
@ -114,8 +120,8 @@ impl CardRenderingService for Backend {
|
||||
|
||||
fn render_uncommitted_card_legacy(
|
||||
&self,
|
||||
input: pb::RenderUncommittedCardLegacyIn,
|
||||
) -> Result<pb::RenderCardOut> {
|
||||
input: pb::RenderUncommittedCardLegacyRequest,
|
||||
) -> Result<pb::RenderCardResponse> {
|
||||
let schema11: CardTemplateSchema11 = serde_json::from_slice(&input.template)?;
|
||||
let template = schema11.into();
|
||||
let mut note = input
|
||||
@ -136,7 +142,7 @@ impl CardRenderingService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn render_markdown(&self, input: pb::RenderMarkdownIn) -> Result<pb::String> {
|
||||
fn render_markdown(&self, input: pb::RenderMarkdownRequest) -> Result<pb::String> {
|
||||
let mut text = render_markdown(&input.markdown);
|
||||
if input.sanitize {
|
||||
// currently no images
|
||||
@ -170,9 +176,9 @@ fn rendered_node_to_proto(node: RenderedNode) -> pb::rendered_template_node::Val
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RenderCardOutput> for pb::RenderCardOut {
|
||||
impl From<RenderCardOutput> for pb::RenderCardResponse {
|
||||
fn from(o: RenderCardOutput) -> Self {
|
||||
pb::RenderCardOut {
|
||||
pb::RenderCardResponse {
|
||||
question_nodes: rendered_nodes_to_proto(o.qnodes),
|
||||
answer_nodes: rendered_nodes_to_proto(o.anodes),
|
||||
css: o.css,
|
||||
|
@ -24,7 +24,7 @@ impl CollectionService for Backend {
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
fn open_collection(&self, input: pb::OpenCollectionIn) -> Result<pb::Empty> {
|
||||
fn open_collection(&self, input: pb::OpenCollectionRequest) -> Result<pb::Empty> {
|
||||
let mut col = self.col.lock().unwrap();
|
||||
if col.is_some() {
|
||||
return Err(AnkiError::CollectionAlreadyOpen);
|
||||
@ -53,7 +53,7 @@ impl CollectionService for Backend {
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
fn close_collection(&self, input: pb::CloseCollectionIn) -> Result<pb::Empty> {
|
||||
fn close_collection(&self, input: pb::CloseCollectionRequest) -> Result<pb::Empty> {
|
||||
self.abort_media_sync_and_wait();
|
||||
|
||||
let mut col = self.col.lock().unwrap();
|
||||
@ -72,14 +72,14 @@ impl CollectionService for Backend {
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
fn check_database(&self, _input: pb::Empty) -> Result<pb::CheckDatabaseOut> {
|
||||
fn check_database(&self, _input: pb::Empty) -> Result<pb::CheckDatabaseResponse> {
|
||||
let mut handler = self.new_progress_handler();
|
||||
let progress_fn = move |progress, throttle| {
|
||||
handler.update(Progress::DatabaseCheck(progress), throttle);
|
||||
};
|
||||
self.with_col(|col| {
|
||||
col.check_database(progress_fn)
|
||||
.map(|problems| pb::CheckDatabaseOut {
|
||||
.map(|problems| pb::CheckDatabaseResponse {
|
||||
problems: problems.to_i18n_strings(&col.tr),
|
||||
})
|
||||
})
|
||||
|
@ -62,7 +62,7 @@ impl ConfigService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn set_config_json(&self, input: pb::SetConfigJsonIn) -> Result<pb::OpChanges> {
|
||||
fn set_config_json(&self, input: pb::SetConfigJsonRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| {
|
||||
let val: Value = serde_json::from_slice(&input.value_json)?;
|
||||
col.set_config_json(input.key.as_str(), &val, input.undoable)
|
||||
@ -70,7 +70,7 @@ impl ConfigService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn set_config_json_no_undo(&self, input: pb::SetConfigJsonIn) -> Result<pb::Empty> {
|
||||
fn set_config_json_no_undo(&self, input: pb::SetConfigJsonRequest) -> Result<pb::Empty> {
|
||||
self.with_col(|col| {
|
||||
let val: Value = serde_json::from_slice(&input.value_json)?;
|
||||
col.transact_no_undo(|col| col.set_config(input.key.as_str(), &val).map(|_| ()))
|
||||
@ -99,7 +99,7 @@ impl ConfigService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn set_config_bool(&self, input: pb::SetConfigBoolIn) -> Result<pb::OpChanges> {
|
||||
fn set_config_bool(&self, input: pb::SetConfigBoolRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| col.set_config_bool(input.key().into(), input.value, input.undoable))
|
||||
.map(Into::into)
|
||||
}
|
||||
@ -112,7 +112,7 @@ impl ConfigService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn set_config_string(&self, input: pb::SetConfigStringIn) -> Result<pb::OpChanges> {
|
||||
fn set_config_string(&self, input: pb::SetConfigStringRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| col.set_config_string(input.key().into(), &input.value, input.undoable))
|
||||
.map(Into::into)
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ use super::Backend;
|
||||
pub(super) use crate::backend_proto::deckconfig_service::Service as DeckConfigService;
|
||||
use crate::{
|
||||
backend_proto as pb,
|
||||
deckconfig::{DeckConfSchema11, DeckConfig, UpdateDeckConfigsIn},
|
||||
deckconfig::{DeckConfSchema11, DeckConfig, UpdateDeckConfigsRequest},
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
@ -63,7 +63,7 @@ impl DeckConfigService for Backend {
|
||||
self.with_col(|col| col.get_deck_configs_for_update(input.into()))
|
||||
}
|
||||
|
||||
fn update_deck_configs(&self, input: pb::UpdateDeckConfigsIn) -> Result<pb::OpChanges> {
|
||||
fn update_deck_configs(&self, input: pb::UpdateDeckConfigsRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| col.update_deck_configs(input.into()))
|
||||
.map(Into::into)
|
||||
}
|
||||
@ -81,9 +81,9 @@ impl From<DeckConfig> for pb::DeckConfig {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<pb::UpdateDeckConfigsIn> for UpdateDeckConfigsIn {
|
||||
fn from(c: pb::UpdateDeckConfigsIn) -> Self {
|
||||
UpdateDeckConfigsIn {
|
||||
impl From<pb::UpdateDeckConfigsRequest> for UpdateDeckConfigsRequest {
|
||||
fn from(c: pb::UpdateDeckConfigsRequest) -> Self {
|
||||
UpdateDeckConfigsRequest {
|
||||
target_deck_id: c.target_deck_id.into(),
|
||||
configs: c.configs.into_iter().map(Into::into).collect(),
|
||||
removed_config_ids: c.removed_config_ids.into_iter().map(Into::into).collect(),
|
||||
|
@ -22,7 +22,10 @@ impl DecksService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn add_or_update_deck_legacy(&self, input: pb::AddOrUpdateDeckLegacyIn) -> Result<pb::DeckId> {
|
||||
fn add_or_update_deck_legacy(
|
||||
&self,
|
||||
input: pb::AddOrUpdateDeckLegacyRequest,
|
||||
) -> Result<pb::DeckId> {
|
||||
self.with_col(|col| {
|
||||
let schema11: DeckSchema11 = serde_json::from_slice(&input.deck)?;
|
||||
let mut deck: Deck = schema11.into();
|
||||
@ -38,7 +41,7 @@ impl DecksService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn deck_tree(&self, input: pb::DeckTreeIn) -> Result<pb::DeckTreeNode> {
|
||||
fn deck_tree(&self, input: pb::DeckTreeRequest) -> Result<pb::DeckTreeNode> {
|
||||
let lim = if input.top_deck_id > 0 {
|
||||
Some(DeckId(input.top_deck_id))
|
||||
} else {
|
||||
@ -118,7 +121,7 @@ impl DecksService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn get_deck_names(&self, input: pb::GetDeckNamesIn) -> Result<pb::DeckNames> {
|
||||
fn get_deck_names(&self, input: pb::GetDeckNamesRequest) -> Result<pb::DeckNames> {
|
||||
self.with_col(|col| {
|
||||
let names = if input.include_filtered {
|
||||
col.get_all_deck_names(input.skip_empty_default)?
|
||||
@ -151,7 +154,7 @@ impl DecksService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn reparent_decks(&self, input: pb::ReparentDecksIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn reparent_decks(&self, input: pb::ReparentDecksRequest) -> Result<pb::OpChangesWithCount> {
|
||||
let deck_ids: Vec<_> = input.deck_ids.into_iter().map(Into::into).collect();
|
||||
let new_parent = if input.new_parent == 0 {
|
||||
None
|
||||
@ -162,7 +165,7 @@ impl DecksService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn rename_deck(&self, input: pb::RenameDeckIn) -> Result<pb::OpChanges> {
|
||||
fn rename_deck(&self, input: pb::RenameDeckRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| col.rename_deck(input.deck_id.into(), &input.new_name))
|
||||
.map(Into::into)
|
||||
}
|
||||
@ -185,7 +188,7 @@ impl DecksService for Backend {
|
||||
Ok(FilteredSearchOrder::labels(&self.tr).into())
|
||||
}
|
||||
|
||||
fn set_deck_collapsed(&self, input: pb::SetDeckCollapsedIn) -> Result<pb::OpChanges> {
|
||||
fn set_deck_collapsed(&self, input: pb::SetDeckCollapsedRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| {
|
||||
col.set_deck_collapsed(input.deck_id.into(), input.collapsed, input.scope())
|
||||
})
|
||||
|
@ -14,7 +14,7 @@ use crate::{
|
||||
};
|
||||
|
||||
impl I18nService for Backend {
|
||||
fn translate_string(&self, input: pb::TranslateStringIn) -> Result<pb::String> {
|
||||
fn translate_string(&self, input: pb::TranslateStringRequest) -> Result<pb::String> {
|
||||
let args = build_fluent_args(input.args);
|
||||
|
||||
Ok(self
|
||||
@ -27,8 +27,8 @@ impl I18nService for Backend {
|
||||
.into())
|
||||
}
|
||||
|
||||
fn format_timespan(&self, input: pb::FormatTimespanIn) -> Result<pb::String> {
|
||||
use pb::format_timespan_in::Context;
|
||||
fn format_timespan(&self, input: pb::FormatTimespanRequest) -> Result<pb::String> {
|
||||
use pb::format_timespan_request::Context;
|
||||
Ok(match input.context() {
|
||||
Context::Precise => time_span(input.seconds, &self.tr, true),
|
||||
Context::Intervals => time_span(input.seconds, &self.tr, false),
|
||||
@ -37,7 +37,7 @@ impl I18nService for Backend {
|
||||
.into())
|
||||
}
|
||||
|
||||
fn i18n_resources(&self, input: pb::I18nResourcesIn) -> Result<pb::Json> {
|
||||
fn i18n_resources(&self, input: pb::I18nResourcesRequest) -> Result<pb::Json> {
|
||||
serde_json::to_vec(&self.tr.resources_for_js(&input.modules))
|
||||
.map(Into::into)
|
||||
.map_err(Into::into)
|
||||
|
@ -13,7 +13,7 @@ impl MediaService for Backend {
|
||||
// media
|
||||
//-----------------------------------------------
|
||||
|
||||
fn check_media(&self, _input: pb::Empty) -> Result<pb::CheckMediaOut> {
|
||||
fn check_media(&self, _input: pb::Empty) -> Result<pb::CheckMediaResponse> {
|
||||
let mut handler = self.new_progress_handler();
|
||||
let progress_fn =
|
||||
move |progress| handler.update(Progress::MediaCheck(progress as u32), true);
|
||||
@ -25,7 +25,7 @@ impl MediaService for Backend {
|
||||
|
||||
let report = checker.summarize_output(&mut output);
|
||||
|
||||
Ok(pb::CheckMediaOut {
|
||||
Ok(pb::CheckMediaResponse {
|
||||
unused: output.unused,
|
||||
missing: output.missing,
|
||||
report,
|
||||
@ -35,7 +35,7 @@ impl MediaService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn trash_media_files(&self, input: pb::TrashMediaFilesIn) -> Result<pb::Empty> {
|
||||
fn trash_media_files(&self, input: pb::TrashMediaFilesRequest) -> Result<pb::Empty> {
|
||||
self.with_col(|col| {
|
||||
let mgr = MediaManager::new(&col.media_folder, &col.media_db)?;
|
||||
let mut ctx = mgr.dbctx();
|
||||
@ -44,7 +44,7 @@ impl MediaService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn add_media_file(&self, input: pb::AddMediaFileIn) -> Result<pb::String> {
|
||||
fn add_media_file(&self, input: pb::AddMediaFileRequest) -> Result<pb::String> {
|
||||
self.with_col(|col| {
|
||||
let mgr = MediaManager::new(&col.media_folder, &col.media_db)?;
|
||||
let mut ctx = mgr.dbctx();
|
||||
|
@ -19,18 +19,21 @@ impl NotesService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn add_note(&self, input: pb::AddNoteIn) -> Result<pb::AddNoteOut> {
|
||||
fn add_note(&self, input: pb::AddNoteRequest) -> Result<pb::AddNoteResponse> {
|
||||
self.with_col(|col| {
|
||||
let mut note: Note = input.note.ok_or(AnkiError::NotFound)?.into();
|
||||
let changes = col.add_note(&mut note, DeckId(input.deck_id))?;
|
||||
Ok(pb::AddNoteOut {
|
||||
Ok(pb::AddNoteResponse {
|
||||
note_id: note.id.0,
|
||||
changes: Some(changes.into()),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn defaults_for_adding(&self, input: pb::DefaultsForAddingIn) -> Result<pb::DeckAndNotetype> {
|
||||
fn defaults_for_adding(
|
||||
&self,
|
||||
input: pb::DefaultsForAddingRequest,
|
||||
) -> Result<pb::DeckAndNotetype> {
|
||||
self.with_col(|col| {
|
||||
let home_deck: DeckId = input.home_deck_of_current_review_card.into();
|
||||
col.defaults_for_adding(home_deck).map(Into::into)
|
||||
@ -46,7 +49,7 @@ impl NotesService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn update_note(&self, input: pb::UpdateNoteIn) -> Result<pb::OpChanges> {
|
||||
fn update_note(&self, input: pb::UpdateNoteRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| {
|
||||
let mut note: Note = input.note.ok_or(AnkiError::NotFound)?.into();
|
||||
col.update_note_maybe_undoable(&mut note, !input.skip_undo_entry)
|
||||
@ -63,7 +66,7 @@ impl NotesService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn remove_notes(&self, input: pb::RemoveNotesIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn remove_notes(&self, input: pb::RemoveNotesRequest) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
if !input.note_ids.is_empty() {
|
||||
col.remove_notes(
|
||||
@ -87,17 +90,20 @@ impl NotesService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn cloze_numbers_in_note(&self, note: pb::Note) -> Result<pb::ClozeNumbersInNoteOut> {
|
||||
fn cloze_numbers_in_note(&self, note: pb::Note) -> Result<pb::ClozeNumbersInNoteResponse> {
|
||||
let mut set = HashSet::with_capacity(4);
|
||||
for field in ¬e.fields {
|
||||
add_cloze_numbers_in_string(field, &mut set);
|
||||
}
|
||||
Ok(pb::ClozeNumbersInNoteOut {
|
||||
Ok(pb::ClozeNumbersInNoteResponse {
|
||||
numbers: set.into_iter().map(|n| n as u32).collect(),
|
||||
})
|
||||
}
|
||||
|
||||
fn after_note_updates(&self, input: pb::AfterNoteUpdatesIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn after_note_updates(
|
||||
&self,
|
||||
input: pb::AfterNoteUpdatesRequest,
|
||||
) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
col.after_note_updates(
|
||||
&to_note_ids(input.nids),
|
||||
@ -110,21 +116,21 @@ impl NotesService for Backend {
|
||||
|
||||
fn field_names_for_notes(
|
||||
&self,
|
||||
input: pb::FieldNamesForNotesIn,
|
||||
) -> Result<pb::FieldNamesForNotesOut> {
|
||||
input: pb::FieldNamesForNotesRequest,
|
||||
) -> Result<pb::FieldNamesForNotesResponse> {
|
||||
self.with_col(|col| {
|
||||
let nids: Vec<_> = input.nids.into_iter().map(NoteId).collect();
|
||||
col.storage
|
||||
.field_names_for_notes(&nids)
|
||||
.map(|fields| pb::FieldNamesForNotesOut { fields })
|
||||
.map(|fields| pb::FieldNamesForNotesResponse { fields })
|
||||
})
|
||||
}
|
||||
|
||||
fn note_fields_check(&self, input: pb::Note) -> Result<pb::NoteFieldsCheckOut> {
|
||||
fn note_fields_check(&self, input: pb::Note) -> Result<pb::NoteFieldsCheckResponse> {
|
||||
let note: Note = input.into();
|
||||
self.with_col(|col| {
|
||||
col.note_fields_check(¬e)
|
||||
.map(|r| pb::NoteFieldsCheckOut { state: r as i32 })
|
||||
.map(|r| pb::NoteFieldsCheckResponse { state: r as i32 })
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,10 @@ impl NotetypesService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn add_or_update_notetype(&self, input: pb::AddOrUpdateNotetypeIn) -> Result<pb::NotetypeId> {
|
||||
fn add_or_update_notetype(
|
||||
&self,
|
||||
input: pb::AddOrUpdateNotetypeRequest,
|
||||
) -> Result<pb::NotetypeId> {
|
||||
self.with_col(|col| {
|
||||
let legacy: NotetypeSchema11 = serde_json::from_slice(&input.json)?;
|
||||
let mut nt: Notetype = legacy.into();
|
||||
@ -138,13 +141,13 @@ impl NotetypesService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn get_aux_notetype_config_key(&self, input: pb::GetAuxConfigKeyIn) -> Result<pb::String> {
|
||||
fn get_aux_notetype_config_key(&self, input: pb::GetAuxConfigKeyRequest) -> Result<pb::String> {
|
||||
Ok(get_aux_notetype_config_key(input.id.into(), &input.key).into())
|
||||
}
|
||||
|
||||
fn get_aux_template_config_key(
|
||||
&self,
|
||||
input: pb::GetAuxTemplateConfigKeyIn,
|
||||
input: pb::GetAuxTemplateConfigKeyRequest,
|
||||
) -> Result<pb::String> {
|
||||
self.with_col(|col| {
|
||||
col.get_aux_template_config_key(
|
||||
@ -165,14 +168,14 @@ impl NotetypesService for Backend {
|
||||
|
||||
fn get_change_notetype_info(
|
||||
&self,
|
||||
input: pb::GetChangeNotetypeInfoIn,
|
||||
input: pb::GetChangeNotetypeInfoRequest,
|
||||
) -> Result<pb::ChangeNotetypeInfo> {
|
||||
self.with_col(|col| {
|
||||
col.notetype_change_info(input.old_notetype_id.into(), input.new_notetype_id.into())
|
||||
.map(Into::into)
|
||||
})
|
||||
}
|
||||
fn change_notetype(&self, input: pb::ChangeNotetypeIn) -> Result<pb::OpChanges> {
|
||||
fn change_notetype(&self, input: pb::ChangeNotetypeRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| col.change_notetype_of_notes(input.into()).map(Into::into))
|
||||
}
|
||||
}
|
||||
@ -203,8 +206,8 @@ impl From<NotetypeChangeInfo> for pb::ChangeNotetypeInfo {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<pb::ChangeNotetypeIn> for ChangeNotetypeInput {
|
||||
fn from(i: pb::ChangeNotetypeIn) -> Self {
|
||||
impl From<pb::ChangeNotetypeRequest> for ChangeNotetypeInput {
|
||||
fn from(i: pb::ChangeNotetypeRequest) -> Self {
|
||||
ChangeNotetypeInput {
|
||||
current_schema: i.current_schema.into(),
|
||||
note_ids: i.note_ids.into_newtype(NoteId),
|
||||
@ -231,9 +234,9 @@ impl From<pb::ChangeNotetypeIn> for ChangeNotetypeInput {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ChangeNotetypeInput> for pb::ChangeNotetypeIn {
|
||||
impl From<ChangeNotetypeInput> for pb::ChangeNotetypeRequest {
|
||||
fn from(i: ChangeNotetypeInput) -> Self {
|
||||
pb::ChangeNotetypeIn {
|
||||
pb::ChangeNotetypeRequest {
|
||||
current_schema: i.current_schema.into(),
|
||||
note_ids: i.note_ids.into_iter().map(Into::into).collect(),
|
||||
old_notetype_id: i.old_notetype_id.into(),
|
||||
|
@ -19,7 +19,7 @@ use crate::{
|
||||
impl SchedulingService for Backend {
|
||||
/// This behaves like _updateCutoff() in older code - it also unburies at the start of
|
||||
/// a new day.
|
||||
fn sched_timing_today(&self, _input: pb::Empty) -> Result<pb::SchedTimingTodayOut> {
|
||||
fn sched_timing_today(&self, _input: pb::Empty) -> Result<pb::SchedTimingTodayResponse> {
|
||||
self.with_col(|col| {
|
||||
let timing = col.timing_today()?;
|
||||
col.unbury_if_day_rolled_over(timing)?;
|
||||
@ -33,11 +33,11 @@ impl SchedulingService for Backend {
|
||||
}
|
||||
|
||||
/// Message rendering only, for old graphs.
|
||||
fn studied_today_message(&self, input: pb::StudiedTodayMessageIn) -> Result<pb::String> {
|
||||
fn studied_today_message(&self, input: pb::StudiedTodayMessageRequest) -> Result<pb::String> {
|
||||
Ok(studied_today(input.cards, input.seconds as f32, &self.tr).into())
|
||||
}
|
||||
|
||||
fn update_stats(&self, input: pb::UpdateStatsIn) -> Result<pb::Empty> {
|
||||
fn update_stats(&self, input: pb::UpdateStatsRequest) -> Result<pb::Empty> {
|
||||
self.with_col(|col| {
|
||||
col.transact_no_undo(|col| {
|
||||
let today = col.current_due_day(0)?;
|
||||
@ -47,7 +47,7 @@ impl SchedulingService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn extend_limits(&self, input: pb::ExtendLimitsIn) -> Result<pb::Empty> {
|
||||
fn extend_limits(&self, input: pb::ExtendLimitsRequest) -> Result<pb::Empty> {
|
||||
self.with_col(|col| {
|
||||
col.transact_no_undo(|col| {
|
||||
let today = col.current_due_day(0)?;
|
||||
@ -64,11 +64,11 @@ impl SchedulingService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn counts_for_deck_today(&self, input: pb::DeckId) -> Result<pb::CountsForDeckTodayOut> {
|
||||
fn counts_for_deck_today(&self, input: pb::DeckId) -> Result<pb::CountsForDeckTodayResponse> {
|
||||
self.with_col(|col| col.counts_for_deck_today(input.did.into()))
|
||||
}
|
||||
|
||||
fn congrats_info(&self, _input: pb::Empty) -> Result<pb::CongratsInfoOut> {
|
||||
fn congrats_info(&self, _input: pb::Empty) -> Result<pb::CongratsInfoResponse> {
|
||||
self.with_col(|col| col.congrats_info())
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ impl SchedulingService for Backend {
|
||||
self.with_col(|col| col.unbury_or_unsuspend_cards(&cids).map(Into::into))
|
||||
}
|
||||
|
||||
fn unbury_deck(&self, input: pb::UnburyDeckIn) -> Result<pb::OpChanges> {
|
||||
fn unbury_deck(&self, input: pb::UnburyDeckRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| {
|
||||
col.unbury_deck(input.deck_id.into(), input.mode())
|
||||
.map(Into::into)
|
||||
@ -86,7 +86,7 @@ impl SchedulingService for Backend {
|
||||
|
||||
fn bury_or_suspend_cards(
|
||||
&self,
|
||||
input: pb::BuryOrSuspendCardsIn,
|
||||
input: pb::BuryOrSuspendCardsRequest,
|
||||
) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
let mode = input.mode();
|
||||
@ -108,7 +108,7 @@ impl SchedulingService for Backend {
|
||||
self.with_col(|col| col.rebuild_filtered_deck(input.did.into()).map(Into::into))
|
||||
}
|
||||
|
||||
fn schedule_cards_as_new(&self, input: pb::ScheduleCardsAsNewIn) -> Result<pb::OpChanges> {
|
||||
fn schedule_cards_as_new(&self, input: pb::ScheduleCardsAsNewRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| {
|
||||
let cids = input.card_ids.into_newtype(CardId);
|
||||
let log = input.log;
|
||||
@ -116,14 +116,14 @@ impl SchedulingService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn set_due_date(&self, input: pb::SetDueDateIn) -> Result<pb::OpChanges> {
|
||||
fn set_due_date(&self, input: pb::SetDueDateRequest) -> Result<pb::OpChanges> {
|
||||
let config = input.config_key.map(Into::into);
|
||||
let days = input.days;
|
||||
let cids = input.card_ids.into_newtype(CardId);
|
||||
self.with_col(|col| col.set_due_date(&cids, &days, config).map(Into::into))
|
||||
}
|
||||
|
||||
fn sort_cards(&self, input: pb::SortCardsIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn sort_cards(&self, input: pb::SortCardsRequest) -> Result<pb::OpChangesWithCount> {
|
||||
let cids = input.card_ids.into_newtype(CardId);
|
||||
let (start, step, random, shift) = (
|
||||
input.starting_from,
|
||||
@ -142,7 +142,7 @@ impl SchedulingService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn sort_deck(&self, input: pb::SortDeckIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn sort_deck(&self, input: pb::SortDeckRequest) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
col.sort_deck_legacy(input.deck_id.into(), input.randomize)
|
||||
.map(Into::into)
|
||||
@ -176,7 +176,7 @@ impl SchedulingService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn get_queued_cards(&self, input: pb::GetQueuedCardsIn) -> Result<pb::QueuedCards> {
|
||||
fn get_queued_cards(&self, input: pb::GetQueuedCardsRequest) -> Result<pb::QueuedCards> {
|
||||
self.with_col(|col| {
|
||||
col.get_queued_cards(input.fetch_limit as usize, input.intraday_learning_only)
|
||||
.map(Into::into)
|
||||
@ -184,9 +184,9 @@ impl SchedulingService for Backend {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<crate::scheduler::timing::SchedTimingToday> for pb::SchedTimingTodayOut {
|
||||
fn from(t: crate::scheduler::timing::SchedTimingToday) -> pb::SchedTimingTodayOut {
|
||||
pb::SchedTimingTodayOut {
|
||||
impl From<crate::scheduler::timing::SchedTimingToday> for pb::SchedTimingTodayResponse {
|
||||
fn from(t: crate::scheduler::timing::SchedTimingToday) -> pb::SchedTimingTodayResponse {
|
||||
pb::SchedTimingTodayResponse {
|
||||
days_elapsed: t.days_elapsed,
|
||||
next_day_at: t.next_day_at.0,
|
||||
}
|
||||
|
@ -22,27 +22,27 @@ impl SearchService for Backend {
|
||||
Ok(write_nodes(&node.into_node_list()).into())
|
||||
}
|
||||
|
||||
fn search_cards(&self, input: pb::SearchIn) -> Result<pb::SearchOut> {
|
||||
fn search_cards(&self, input: pb::SearchRequest) -> Result<pb::SearchResponse> {
|
||||
self.with_col(|col| {
|
||||
let order = input.order.unwrap_or_default().value.into();
|
||||
let cids = col.search_cards(&input.search, order)?;
|
||||
Ok(pb::SearchOut {
|
||||
Ok(pb::SearchResponse {
|
||||
ids: cids.into_iter().map(|v| v.0).collect(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn search_notes(&self, input: pb::SearchIn) -> Result<pb::SearchOut> {
|
||||
fn search_notes(&self, input: pb::SearchRequest) -> Result<pb::SearchResponse> {
|
||||
self.with_col(|col| {
|
||||
let order = input.order.unwrap_or_default().value.into();
|
||||
let nids = col.search_notes(&input.search, order)?;
|
||||
Ok(pb::SearchOut {
|
||||
Ok(pb::SearchResponse {
|
||||
ids: nids.into_iter().map(|v| v.0).collect(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn join_search_nodes(&self, input: pb::JoinSearchNodesIn) -> Result<pb::String> {
|
||||
fn join_search_nodes(&self, input: pb::JoinSearchNodesRequest) -> Result<pb::String> {
|
||||
let sep = input.joiner().into();
|
||||
let existing_nodes = {
|
||||
let node: Node = input.existing_node.unwrap_or_default().try_into()?;
|
||||
@ -52,7 +52,7 @@ impl SearchService for Backend {
|
||||
Ok(concatenate_searches(sep, existing_nodes, additional_node).into())
|
||||
}
|
||||
|
||||
fn replace_search_node(&self, input: pb::ReplaceSearchNodeIn) -> Result<pb::String> {
|
||||
fn replace_search_node(&self, input: pb::ReplaceSearchNodeRequest) -> Result<pb::String> {
|
||||
let existing = {
|
||||
let node = input.existing_node.unwrap_or_default().try_into()?;
|
||||
if let Node::Group(nodes) = node {
|
||||
@ -65,7 +65,7 @@ impl SearchService for Backend {
|
||||
Ok(replace_search_node(existing, replacement).into())
|
||||
}
|
||||
|
||||
fn find_and_replace(&self, input: pb::FindAndReplaceIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn find_and_replace(&self, input: pb::FindAndReplaceRequest) -> Result<pb::OpChangesWithCount> {
|
||||
let mut search = if input.regex {
|
||||
input.search
|
||||
} else {
|
||||
|
@ -11,7 +11,7 @@ impl StatsService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn graphs(&self, input: pb::GraphsIn) -> Result<pb::GraphsOut> {
|
||||
fn graphs(&self, input: pb::GraphsRequest) -> Result<pb::GraphsResponse> {
|
||||
self.with_col(|col| col.graph_data_for_search(&input.search, input.days))
|
||||
}
|
||||
|
||||
|
@ -31,39 +31,39 @@ pub(super) struct SyncState {
|
||||
#[derive(Default, Debug)]
|
||||
pub(super) struct RemoteSyncStatus {
|
||||
pub last_check: TimestampSecs,
|
||||
pub last_response: pb::sync_status_out::Required,
|
||||
pub last_response: pb::sync_status_response::Required,
|
||||
}
|
||||
|
||||
impl RemoteSyncStatus {
|
||||
pub(super) fn update(&mut self, required: pb::sync_status_out::Required) {
|
||||
pub(super) fn update(&mut self, required: pb::sync_status_response::Required) {
|
||||
self.last_check = TimestampSecs::now();
|
||||
self.last_response = required
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SyncOutput> for pb::SyncCollectionOut {
|
||||
impl From<SyncOutput> for pb::SyncCollectionResponse {
|
||||
fn from(o: SyncOutput) -> Self {
|
||||
pb::SyncCollectionOut {
|
||||
pb::SyncCollectionResponse {
|
||||
host_number: o.host_number,
|
||||
server_message: o.server_message,
|
||||
required: match o.required {
|
||||
SyncActionRequired::NoChanges => {
|
||||
pb::sync_collection_out::ChangesRequired::NoChanges as i32
|
||||
pb::sync_collection_response::ChangesRequired::NoChanges as i32
|
||||
}
|
||||
SyncActionRequired::FullSyncRequired {
|
||||
upload_ok,
|
||||
download_ok,
|
||||
} => {
|
||||
if !upload_ok {
|
||||
pb::sync_collection_out::ChangesRequired::FullDownload as i32
|
||||
pb::sync_collection_response::ChangesRequired::FullDownload as i32
|
||||
} else if !download_ok {
|
||||
pb::sync_collection_out::ChangesRequired::FullUpload as i32
|
||||
pb::sync_collection_response::ChangesRequired::FullUpload as i32
|
||||
} else {
|
||||
pb::sync_collection_out::ChangesRequired::FullSync as i32
|
||||
pb::sync_collection_response::ChangesRequired::FullSync as i32
|
||||
}
|
||||
}
|
||||
SyncActionRequired::NormalSyncRequired => {
|
||||
pb::sync_collection_out::ChangesRequired::NormalSync as i32
|
||||
pb::sync_collection_response::ChangesRequired::NormalSync as i32
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -104,15 +104,15 @@ impl SyncService for Backend {
|
||||
self.with_col(|col| col.before_upload().map(Into::into))
|
||||
}
|
||||
|
||||
fn sync_login(&self, input: pb::SyncLoginIn) -> Result<pb::SyncAuth> {
|
||||
fn sync_login(&self, input: pb::SyncLoginRequest) -> Result<pb::SyncAuth> {
|
||||
self.sync_login_inner(input)
|
||||
}
|
||||
|
||||
fn sync_status(&self, input: pb::SyncAuth) -> Result<pb::SyncStatusOut> {
|
||||
fn sync_status(&self, input: pb::SyncAuth) -> Result<pb::SyncStatusResponse> {
|
||||
self.sync_status_inner(input)
|
||||
}
|
||||
|
||||
fn sync_collection(&self, input: pb::SyncAuth) -> Result<pb::SyncCollectionOut> {
|
||||
fn sync_collection(&self, input: pb::SyncAuth) -> Result<pb::SyncCollectionResponse> {
|
||||
self.sync_collection_inner(input)
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ impl SyncService for Backend {
|
||||
Ok(().into())
|
||||
}
|
||||
|
||||
fn sync_server_method(&self, input: pb::SyncServerMethodIn) -> Result<pb::Json> {
|
||||
fn sync_server_method(&self, input: pb::SyncServerMethodRequest) -> Result<pb::Json> {
|
||||
let req = SyncRequest::from_method_and_data(input.method(), input.data)?;
|
||||
self.sync_server_method_inner(req).map(Into::into)
|
||||
}
|
||||
@ -221,7 +221,7 @@ impl Backend {
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn sync_login_inner(&self, input: pb::SyncLoginIn) -> Result<pb::SyncAuth> {
|
||||
pub(super) fn sync_login_inner(&self, input: pb::SyncLoginRequest) -> Result<pb::SyncAuth> {
|
||||
let (_guard, abort_reg) = self.sync_abort_handle()?;
|
||||
|
||||
let rt = self.runtime_handle();
|
||||
@ -237,10 +237,10 @@ impl Backend {
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn sync_status_inner(&self, input: pb::SyncAuth) -> Result<pb::SyncStatusOut> {
|
||||
pub(super) fn sync_status_inner(&self, input: pb::SyncAuth) -> Result<pb::SyncStatusResponse> {
|
||||
// any local changes mean we can skip the network round-trip
|
||||
let req = self.with_col(|col| col.get_local_sync_status())?;
|
||||
if req != pb::sync_status_out::Required::NoChanges {
|
||||
if req != pb::sync_status_response::Required::NoChanges {
|
||||
return Ok(req.into());
|
||||
}
|
||||
|
||||
@ -275,7 +275,7 @@ impl Backend {
|
||||
pub(super) fn sync_collection_inner(
|
||||
&self,
|
||||
input: pb::SyncAuth,
|
||||
) -> Result<pb::SyncCollectionOut> {
|
||||
) -> Result<pb::SyncCollectionResponse> {
|
||||
let (_guard, abort_reg) = self.sync_abort_handle()?;
|
||||
|
||||
let rt = self.runtime_handle();
|
||||
@ -367,7 +367,7 @@ impl Backend {
|
||||
.unwrap()
|
||||
.sync
|
||||
.remote_sync_status
|
||||
.update(pb::sync_status_out::Required::NoChanges);
|
||||
.update(pb::sync_status_response::Required::NoChanges);
|
||||
}
|
||||
sync_result
|
||||
}
|
||||
|
@ -11,10 +11,10 @@ use crate::{
|
||||
prelude::*,
|
||||
sync::{
|
||||
http::{
|
||||
ApplyChangesIn, ApplyChunkIn, ApplyGravesIn, HostKeyIn, HostKeyOut, MetaIn,
|
||||
SanityCheckIn, StartIn, SyncRequest,
|
||||
ApplyChangesRequest, ApplyChunkRequest, ApplyGravesRequest, HostKeyRequest,
|
||||
HostKeyResponse, MetaRequest, SanityCheckRequest, StartRequest, SyncRequest,
|
||||
},
|
||||
Chunk, Graves, LocalServer, SanityCheckOut, SanityCheckStatus, SyncMeta, SyncServer,
|
||||
Chunk, Graves, LocalServer, SanityCheckResponse, SanityCheckStatus, SyncMeta, SyncServer,
|
||||
UnchunkedChanges, SYNC_VERSION_MAX, SYNC_VERSION_MIN,
|
||||
},
|
||||
};
|
||||
@ -39,13 +39,13 @@ impl Backend {
|
||||
}
|
||||
|
||||
/// Gives out a dummy hkey - auth should be implemented at a higher layer.
|
||||
fn host_key(&self, _input: HostKeyIn) -> Result<HostKeyOut> {
|
||||
Ok(HostKeyOut {
|
||||
fn host_key(&self, _input: HostKeyRequest) -> Result<HostKeyResponse> {
|
||||
Ok(HostKeyResponse {
|
||||
key: "unimplemented".into(),
|
||||
})
|
||||
}
|
||||
|
||||
fn meta(&self, input: MetaIn) -> Result<SyncMeta> {
|
||||
fn meta(&self, input: MetaRequest) -> Result<SyncMeta> {
|
||||
if input.sync_version < SYNC_VERSION_MIN || input.sync_version > SYNC_VERSION_MAX {
|
||||
return Ok(SyncMeta {
|
||||
server_message: "Your Anki version is either too old, or too new.".into(),
|
||||
@ -86,7 +86,7 @@ impl Backend {
|
||||
.ok_or_else(|| AnkiError::sync_error("", SyncErrorKind::SyncNotStarted))
|
||||
}
|
||||
|
||||
fn start(&self, input: StartIn) -> Result<Graves> {
|
||||
fn start(&self, input: StartRequest) -> Result<Graves> {
|
||||
// place col into new server
|
||||
let server = self.col_into_server()?;
|
||||
let mut state_guard = self.state.lock().unwrap();
|
||||
@ -103,14 +103,14 @@ impl Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_graves(&self, input: ApplyGravesIn) -> Result<()> {
|
||||
fn apply_graves(&self, input: ApplyGravesRequest) -> Result<()> {
|
||||
self.with_sync_server(|server| {
|
||||
let rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.apply_graves(input.chunk))
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_changes(&self, input: ApplyChangesIn) -> Result<UnchunkedChanges> {
|
||||
fn apply_changes(&self, input: ApplyChangesRequest) -> Result<UnchunkedChanges> {
|
||||
self.with_sync_server(|server| {
|
||||
let rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.apply_changes(input.changes))
|
||||
@ -124,14 +124,14 @@ impl Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_chunk(&self, input: ApplyChunkIn) -> Result<()> {
|
||||
fn apply_chunk(&self, input: ApplyChunkRequest) -> Result<()> {
|
||||
self.with_sync_server(|server| {
|
||||
let rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.apply_chunk(input.chunk))
|
||||
})
|
||||
}
|
||||
|
||||
fn sanity_check(&self, input: SanityCheckIn) -> Result<SanityCheckOut> {
|
||||
fn sanity_check(&self, input: SanityCheckRequest) -> Result<SanityCheckResponse> {
|
||||
self.with_sync_server(|server| {
|
||||
let rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.sanity_check(input.client))
|
||||
|
@ -27,7 +27,7 @@ impl TagsService for Backend {
|
||||
self.with_col(|col| col.remove_tags(tags.val.as_str()).map(Into::into))
|
||||
}
|
||||
|
||||
fn set_tag_collapsed(&self, input: pb::SetTagCollapsedIn) -> Result<pb::OpChanges> {
|
||||
fn set_tag_collapsed(&self, input: pb::SetTagCollapsedRequest) -> Result<pb::OpChanges> {
|
||||
self.with_col(|col| {
|
||||
col.set_tag_collapsed(&input.name, input.collapsed)
|
||||
.map(Into::into)
|
||||
@ -38,7 +38,7 @@ impl TagsService for Backend {
|
||||
self.with_col(|col| col.tag_tree())
|
||||
}
|
||||
|
||||
fn reparent_tags(&self, input: pb::ReparentTagsIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn reparent_tags(&self, input: pb::ReparentTagsRequest) -> Result<pb::OpChangesWithCount> {
|
||||
let source_tags = input.tags;
|
||||
let target_tag = if input.new_parent.is_empty() {
|
||||
None
|
||||
@ -49,19 +49,19 @@ impl TagsService for Backend {
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn rename_tags(&self, input: pb::RenameTagsIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn rename_tags(&self, input: pb::RenameTagsRequest) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| col.rename_tag(&input.current_prefix, &input.new_prefix))
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn add_note_tags(&self, input: pb::NoteIdsAndTagsIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn add_note_tags(&self, input: pb::NoteIdsAndTagsRequest) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
col.add_tags_to_notes(&to_note_ids(input.note_ids), &input.tags)
|
||||
.map(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
fn remove_note_tags(&self, input: pb::NoteIdsAndTagsIn) -> Result<pb::OpChangesWithCount> {
|
||||
fn remove_note_tags(&self, input: pb::NoteIdsAndTagsRequest) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
col.remove_tags_from_notes(&to_note_ids(input.note_ids), &input.tags)
|
||||
.map(Into::into)
|
||||
@ -70,7 +70,7 @@ impl TagsService for Backend {
|
||||
|
||||
fn find_and_replace_tag(
|
||||
&self,
|
||||
input: pb::FindAndReplaceTagIn,
|
||||
input: pb::FindAndReplaceTagRequest,
|
||||
) -> Result<pb::OpChangesWithCount> {
|
||||
self.with_col(|col| {
|
||||
col.find_and_replace_tag(
|
||||
|
@ -6,7 +6,7 @@ pub(crate) mod undo;
|
||||
mod update;
|
||||
|
||||
pub use schema11::{DeckConfSchema11, NewCardOrderSchema11};
|
||||
pub use update::UpdateDeckConfigsIn;
|
||||
pub use update::UpdateDeckConfigsRequest;
|
||||
|
||||
pub use crate::backend_proto::deck_config::{
|
||||
config::{
|
||||
|
@ -15,7 +15,7 @@ use crate::{
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UpdateDeckConfigsIn {
|
||||
pub struct UpdateDeckConfigsRequest {
|
||||
pub target_deck_id: DeckId,
|
||||
/// Deck will be set to last provided deck config.
|
||||
pub configs: Vec<DeckConfig>,
|
||||
@ -43,7 +43,7 @@ impl Collection {
|
||||
}
|
||||
|
||||
/// Information required for the deck options screen.
|
||||
pub fn update_deck_configs(&mut self, input: UpdateDeckConfigsIn) -> Result<OpOutput<()>> {
|
||||
pub fn update_deck_configs(&mut self, input: UpdateDeckConfigsRequest) -> Result<OpOutput<()>> {
|
||||
self.transact(Op::UpdateDeckConfig, |col| {
|
||||
col.update_deck_configs_inner(input)
|
||||
})
|
||||
@ -106,7 +106,7 @@ impl Collection {
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn update_deck_configs_inner(&mut self, mut input: UpdateDeckConfigsIn) -> Result<()> {
|
||||
fn update_deck_configs_inner(&mut self, mut input: UpdateDeckConfigsRequest) -> Result<()> {
|
||||
if input.configs.is_empty() {
|
||||
return Err(AnkiError::invalid_input("config not provided"));
|
||||
}
|
||||
@ -219,7 +219,7 @@ mod test {
|
||||
|
||||
// if nothing changed, no changes should be made
|
||||
let output = col.get_deck_configs_for_update(DeckId(1))?;
|
||||
let mut input = UpdateDeckConfigsIn {
|
||||
let mut input = UpdateDeckConfigsRequest {
|
||||
target_deck_id: DeckId(1),
|
||||
configs: output
|
||||
.all_config
|
||||
|
@ -44,11 +44,11 @@ impl Collection {
|
||||
pub(crate) fn counts_for_deck_today(
|
||||
&mut self,
|
||||
did: DeckId,
|
||||
) -> Result<pb::CountsForDeckTodayOut> {
|
||||
) -> Result<pb::CountsForDeckTodayResponse> {
|
||||
let today = self.current_due_day(0)?;
|
||||
let mut deck = self.storage.get_deck(did)?.ok_or(AnkiError::NotFound)?;
|
||||
deck.reset_stats_if_day_changed(today);
|
||||
Ok(pb::CountsForDeckTodayOut {
|
||||
Ok(pb::CountsForDeckTodayResponse {
|
||||
new: deck.common.new_studied,
|
||||
review: deck.common.review_studied,
|
||||
})
|
||||
|
@ -23,7 +23,7 @@ impl Collection {
|
||||
&mut self,
|
||||
today: u32,
|
||||
usn: Usn,
|
||||
input: pb::UpdateStatsIn,
|
||||
input: pb::UpdateStatsRequest,
|
||||
) -> Result<()> {
|
||||
let did = input.deck_id.into();
|
||||
let mutator = |c: &mut DeckCommon| {
|
||||
|
@ -13,7 +13,7 @@ use super::{
|
||||
limits::{remaining_limits_map, RemainingLimits},
|
||||
DueCounts,
|
||||
};
|
||||
pub use crate::backend_proto::set_deck_collapsed_in::Scope as DeckCollapseScope;
|
||||
pub use crate::backend_proto::set_deck_collapsed_request::Scope as DeckCollapseScope;
|
||||
use crate::{
|
||||
backend_proto::DeckTreeNode, config::SchedulerVersion, ops::OpOutput, prelude::*, undo::Op,
|
||||
};
|
||||
|
@ -14,7 +14,7 @@ use num_integer::Integer;
|
||||
|
||||
use crate::{
|
||||
backend_proto as pb,
|
||||
backend_proto::note_fields_check_out::State as NoteFieldsState,
|
||||
backend_proto::note_fields_check_response::State as NoteFieldsState,
|
||||
cloze::contains_cloze,
|
||||
decks::DeckId,
|
||||
define_newtype,
|
||||
|
@ -320,7 +320,7 @@ impl Collection {
|
||||
self.update_deck_stats(
|
||||
updater.timing.days_elapsed,
|
||||
usn,
|
||||
backend_proto::UpdateStatsIn {
|
||||
backend_proto::UpdateStatsRequest {
|
||||
deck_id: updater.deck.id.0,
|
||||
new_delta,
|
||||
review_delta,
|
||||
|
@ -4,7 +4,8 @@
|
||||
use super::timing::SchedTimingToday;
|
||||
use crate::{
|
||||
backend_proto::{
|
||||
bury_or_suspend_cards_in::Mode as BuryOrSuspendMode, unbury_deck_in::Mode as UnburyDeckMode,
|
||||
bury_or_suspend_cards_request::Mode as BuryOrSuspendMode,
|
||||
unbury_deck_request::Mode as UnburyDeckMode,
|
||||
},
|
||||
card::CardQueue,
|
||||
config::SchedulerVersion,
|
||||
|
@ -14,7 +14,7 @@ pub(crate) struct CongratsInfo {
|
||||
}
|
||||
|
||||
impl Collection {
|
||||
pub fn congrats_info(&mut self) -> Result<pb::CongratsInfoOut> {
|
||||
pub fn congrats_info(&mut self) -> Result<pb::CongratsInfoResponse> {
|
||||
let did = self.get_current_deck_id();
|
||||
let deck = self.get_deck(did)?.ok_or(AnkiError::NotFound)?;
|
||||
let today = self.timing_today()?.days_elapsed;
|
||||
@ -25,7 +25,7 @@ impl Collection {
|
||||
- self.learn_ahead_secs() as i64
|
||||
- TimestampSecs::now().0)
|
||||
.max(0) as u32;
|
||||
Ok(pb::CongratsInfoOut {
|
||||
Ok(pb::CongratsInfoResponse {
|
||||
learn_remaining: info.learn_count,
|
||||
review_remaining: info.review_remaining,
|
||||
new_remaining: info.new_remaining,
|
||||
@ -49,7 +49,7 @@ mod test {
|
||||
let info = col.congrats_info().unwrap();
|
||||
assert_eq!(
|
||||
info,
|
||||
crate::backend_proto::CongratsInfoOut {
|
||||
crate::backend_proto::CongratsInfoResponse {
|
||||
learn_remaining: 0,
|
||||
review_remaining: false,
|
||||
new_remaining: false,
|
||||
|
@ -14,13 +14,13 @@ impl Collection {
|
||||
&mut self,
|
||||
search: &str,
|
||||
days: u32,
|
||||
) -> Result<pb::GraphsOut> {
|
||||
) -> Result<pb::GraphsResponse> {
|
||||
self.search_cards_into_table(search, SortMode::NoOrder)?;
|
||||
let all = search.trim().is_empty();
|
||||
self.graph_data(all, days)
|
||||
}
|
||||
|
||||
fn graph_data(&mut self, all: bool, days: u32) -> Result<pb::GraphsOut> {
|
||||
fn graph_data(&mut self, all: bool, days: u32) -> Result<pb::GraphsResponse> {
|
||||
let timing = self.timing_today()?;
|
||||
let revlog_start = if days > 0 {
|
||||
timing
|
||||
@ -43,7 +43,7 @@ impl Collection {
|
||||
|
||||
self.storage.clear_searched_cards_table()?;
|
||||
|
||||
Ok(pb::GraphsOut {
|
||||
Ok(pb::GraphsResponse {
|
||||
cards: cards.into_iter().map(Into::into).collect(),
|
||||
revlog,
|
||||
days_elapsed: timing.days_elapsed,
|
||||
|
@ -6,19 +6,19 @@ use std::{fs, path::PathBuf};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::{Chunk, Graves, SanityCheckCounts, UnchunkedChanges};
|
||||
use crate::{backend_proto::sync_server_method_in::Method, prelude::*};
|
||||
use crate::{backend_proto::sync_server_method_request::Method, prelude::*};
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum SyncRequest {
|
||||
HostKey(HostKeyIn),
|
||||
Meta(MetaIn),
|
||||
Start(StartIn),
|
||||
ApplyGraves(ApplyGravesIn),
|
||||
ApplyChanges(ApplyChangesIn),
|
||||
HostKey(HostKeyRequest),
|
||||
Meta(MetaRequest),
|
||||
Start(StartRequest),
|
||||
ApplyGraves(ApplyGravesRequest),
|
||||
ApplyChanges(ApplyChangesRequest),
|
||||
Chunk,
|
||||
ApplyChunk(ApplyChunkIn),
|
||||
ApplyChunk(ApplyChunkRequest),
|
||||
#[serde(rename = "sanityCheck2")]
|
||||
SanityCheck(SanityCheckIn),
|
||||
SanityCheck(SanityCheckRequest),
|
||||
Finish,
|
||||
Abort,
|
||||
#[serde(rename = "upload")]
|
||||
@ -73,19 +73,19 @@ impl SyncRequest {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct HostKeyIn {
|
||||
pub struct HostKeyRequest {
|
||||
#[serde(rename = "u")]
|
||||
pub username: String,
|
||||
#[serde(rename = "p")]
|
||||
pub password: String,
|
||||
}
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct HostKeyOut {
|
||||
pub struct HostKeyResponse {
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MetaIn {
|
||||
pub struct MetaRequest {
|
||||
#[serde(rename = "v")]
|
||||
pub sync_version: u8,
|
||||
#[serde(rename = "cv")]
|
||||
@ -93,7 +93,7 @@ pub struct MetaIn {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct StartIn {
|
||||
pub struct StartRequest {
|
||||
#[serde(rename = "minUsn")]
|
||||
pub client_usn: Usn,
|
||||
#[serde(rename = "lnewer")]
|
||||
@ -104,21 +104,21 @@ pub struct StartIn {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ApplyGravesIn {
|
||||
pub struct ApplyGravesRequest {
|
||||
pub chunk: Graves,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ApplyChangesIn {
|
||||
pub struct ApplyChangesRequest {
|
||||
pub changes: UnchunkedChanges,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ApplyChunkIn {
|
||||
pub struct ApplyChunkRequest {
|
||||
pub chunk: Chunk,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct SanityCheckIn {
|
||||
pub struct SanityCheckRequest {
|
||||
pub client: SanityCheckCounts,
|
||||
}
|
||||
|
@ -13,12 +13,12 @@ use tempfile::NamedTempFile;
|
||||
|
||||
use super::{
|
||||
http::{
|
||||
ApplyChangesIn, ApplyChunkIn, ApplyGravesIn, HostKeyIn, HostKeyOut, MetaIn, SanityCheckIn,
|
||||
StartIn, SyncRequest,
|
||||
ApplyChangesRequest, ApplyChunkRequest, ApplyGravesRequest, HostKeyRequest,
|
||||
HostKeyResponse, MetaRequest, SanityCheckRequest, StartRequest, SyncRequest,
|
||||
},
|
||||
server::SyncServer,
|
||||
Chunk, FullSyncProgress, Graves, SanityCheckCounts, SanityCheckOut, SyncMeta, UnchunkedChanges,
|
||||
SYNC_VERSION_MAX,
|
||||
Chunk, FullSyncProgress, Graves, SanityCheckCounts, SanityCheckResponse, SyncMeta,
|
||||
UnchunkedChanges, SYNC_VERSION_MAX,
|
||||
};
|
||||
use crate::{error::SyncErrorKind, notes::guid, prelude::*, version::sync_client_version};
|
||||
|
||||
@ -60,7 +60,7 @@ impl Timeouts {
|
||||
#[async_trait(?Send)]
|
||||
impl SyncServer for HttpSyncClient {
|
||||
async fn meta(&self) -> Result<SyncMeta> {
|
||||
let input = SyncRequest::Meta(MetaIn {
|
||||
let input = SyncRequest::Meta(MetaRequest {
|
||||
sync_version: SYNC_VERSION_MAX,
|
||||
client_version: sync_client_version().to_string(),
|
||||
});
|
||||
@ -73,7 +73,7 @@ impl SyncServer for HttpSyncClient {
|
||||
local_is_newer: bool,
|
||||
deprecated_client_graves: Option<Graves>,
|
||||
) -> Result<Graves> {
|
||||
let input = SyncRequest::Start(StartIn {
|
||||
let input = SyncRequest::Start(StartRequest {
|
||||
client_usn,
|
||||
local_is_newer,
|
||||
deprecated_client_graves,
|
||||
@ -82,12 +82,12 @@ impl SyncServer for HttpSyncClient {
|
||||
}
|
||||
|
||||
async fn apply_graves(&mut self, chunk: Graves) -> Result<()> {
|
||||
let input = SyncRequest::ApplyGraves(ApplyGravesIn { chunk });
|
||||
let input = SyncRequest::ApplyGraves(ApplyGravesRequest { chunk });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn apply_changes(&mut self, changes: UnchunkedChanges) -> Result<UnchunkedChanges> {
|
||||
let input = SyncRequest::ApplyChanges(ApplyChangesIn { changes });
|
||||
let input = SyncRequest::ApplyChanges(ApplyChangesRequest { changes });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
@ -97,12 +97,12 @@ impl SyncServer for HttpSyncClient {
|
||||
}
|
||||
|
||||
async fn apply_chunk(&mut self, chunk: Chunk) -> Result<()> {
|
||||
let input = SyncRequest::ApplyChunk(ApplyChunkIn { chunk });
|
||||
let input = SyncRequest::ApplyChunk(ApplyChunkRequest { chunk });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn sanity_check(&mut self, client: SanityCheckCounts) -> Result<SanityCheckOut> {
|
||||
let input = SyncRequest::SanityCheck(SanityCheckIn { client });
|
||||
async fn sanity_check(&mut self, client: SanityCheckCounts) -> Result<SanityCheckResponse> {
|
||||
let input = SyncRequest::SanityCheck(SanityCheckRequest { client });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
@ -249,11 +249,11 @@ impl HttpSyncClient {
|
||||
}
|
||||
|
||||
pub(crate) async fn login<S: Into<String>>(&mut self, username: S, password: S) -> Result<()> {
|
||||
let input = SyncRequest::HostKey(HostKeyIn {
|
||||
let input = SyncRequest::HostKey(HostKeyRequest {
|
||||
username: username.into(),
|
||||
password: password.into(),
|
||||
});
|
||||
let output: HostKeyOut = self.json_request(input).await?;
|
||||
let output: HostKeyResponse = self.json_request(input).await?;
|
||||
self.hkey = Some(output.key);
|
||||
|
||||
Ok(())
|
||||
|
@ -16,7 +16,7 @@ use serde_tuple::Serialize_tuple;
|
||||
pub(crate) use server::{LocalServer, SyncServer};
|
||||
|
||||
use crate::{
|
||||
backend_proto::{sync_status_out, SyncStatusOut},
|
||||
backend_proto::{sync_status_response, SyncStatusResponse},
|
||||
card::{Card, CardQueue, CardType},
|
||||
deckconfig::DeckConfSchema11,
|
||||
decks::DeckSchema11,
|
||||
@ -164,7 +164,7 @@ pub struct CardEntry {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct SanityCheckOut {
|
||||
pub struct SanityCheckResponse {
|
||||
pub status: SanityCheckStatus,
|
||||
#[serde(rename = "c", default, deserialize_with = "default_on_invalid")]
|
||||
pub client: Option<SanityCheckCounts>,
|
||||
@ -545,7 +545,7 @@ where
|
||||
self.col.log,
|
||||
"gathered local counts; waiting for server reply"
|
||||
);
|
||||
let out: SanityCheckOut = self.remote.sanity_check(local_counts).await?;
|
||||
let out: SanityCheckResponse = self.remote.sanity_check(local_counts).await?;
|
||||
debug!(self.col.log, "got server reply");
|
||||
if out.status != SanityCheckStatus::Ok {
|
||||
Err(AnkiError::sync_error(
|
||||
@ -609,20 +609,20 @@ pub(crate) async fn get_remote_sync_meta(auth: SyncAuth) -> Result<SyncMeta> {
|
||||
}
|
||||
|
||||
impl Collection {
|
||||
pub fn get_local_sync_status(&mut self) -> Result<sync_status_out::Required> {
|
||||
pub fn get_local_sync_status(&mut self) -> Result<sync_status_response::Required> {
|
||||
let stamps = self.storage.get_collection_timestamps()?;
|
||||
let required = if stamps.schema_changed_since_sync() {
|
||||
sync_status_out::Required::FullSync
|
||||
sync_status_response::Required::FullSync
|
||||
} else if stamps.collection_changed_since_sync() {
|
||||
sync_status_out::Required::NormalSync
|
||||
sync_status_response::Required::NormalSync
|
||||
} else {
|
||||
sync_status_out::Required::NoChanges
|
||||
sync_status_response::Required::NoChanges
|
||||
};
|
||||
|
||||
Ok(required)
|
||||
}
|
||||
|
||||
pub fn get_sync_status(&self, remote: SyncMeta) -> Result<sync_status_out::Required> {
|
||||
pub fn get_sync_status(&self, remote: SyncMeta) -> Result<sync_status_response::Required> {
|
||||
Ok(self.sync_meta()?.compared_to_remote(remote).required.into())
|
||||
}
|
||||
|
||||
@ -1173,18 +1173,18 @@ impl From<SyncState> for SyncOutput {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<sync_status_out::Required> for SyncStatusOut {
|
||||
fn from(r: sync_status_out::Required) -> Self {
|
||||
SyncStatusOut { required: r.into() }
|
||||
impl From<sync_status_response::Required> for SyncStatusResponse {
|
||||
fn from(r: sync_status_response::Required) -> Self {
|
||||
SyncStatusResponse { required: r.into() }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SyncActionRequired> for sync_status_out::Required {
|
||||
impl From<SyncActionRequired> for sync_status_response::Required {
|
||||
fn from(r: SyncActionRequired) -> Self {
|
||||
match r {
|
||||
SyncActionRequired::NoChanges => sync_status_out::Required::NoChanges,
|
||||
SyncActionRequired::FullSyncRequired { .. } => sync_status_out::Required::FullSync,
|
||||
SyncActionRequired::NormalSyncRequired => sync_status_out::Required::NormalSync,
|
||||
SyncActionRequired::NoChanges => sync_status_response::Required::NoChanges,
|
||||
SyncActionRequired::FullSyncRequired { .. } => sync_status_response::Required::FullSync,
|
||||
SyncActionRequired::NormalSyncRequired => sync_status_response::Required::NormalSync,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1415,7 +1415,7 @@ mod test {
|
||||
// and sync our changes
|
||||
let remote_meta = ctx.server().meta().await.unwrap();
|
||||
let out = col1.get_sync_status(remote_meta)?;
|
||||
assert_eq!(out, sync_status_out::Required::NormalSync);
|
||||
assert_eq!(out, sync_status_response::Required::NormalSync);
|
||||
|
||||
let out = ctx.normal_sync(&mut col1).await;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
|
@ -11,7 +11,7 @@ use crate::{
|
||||
prelude::*,
|
||||
storage::open_and_check_sqlite_file,
|
||||
sync::{
|
||||
Chunk, Graves, SanityCheckCounts, SanityCheckOut, SanityCheckStatus, SyncMeta,
|
||||
Chunk, Graves, SanityCheckCounts, SanityCheckResponse, SanityCheckStatus, SyncMeta,
|
||||
UnchunkedChanges, Usn,
|
||||
},
|
||||
};
|
||||
@ -29,7 +29,7 @@ pub trait SyncServer {
|
||||
-> Result<UnchunkedChanges>;
|
||||
async fn chunk(&mut self) -> Result<Chunk>;
|
||||
async fn apply_chunk(&mut self, client_chunk: Chunk) -> Result<()>;
|
||||
async fn sanity_check(&mut self, client: SanityCheckCounts) -> Result<SanityCheckOut>;
|
||||
async fn sanity_check(&mut self, client: SanityCheckCounts) -> Result<SanityCheckResponse>;
|
||||
async fn finish(&mut self) -> Result<TimestampMillis>;
|
||||
async fn abort(&mut self) -> Result<()>;
|
||||
|
||||
@ -148,10 +148,10 @@ impl SyncServer for LocalServer {
|
||||
self.col.apply_chunk(client_chunk, self.client_usn)
|
||||
}
|
||||
|
||||
async fn sanity_check(&mut self, mut client: SanityCheckCounts) -> Result<SanityCheckOut> {
|
||||
async fn sanity_check(&mut self, mut client: SanityCheckCounts) -> Result<SanityCheckResponse> {
|
||||
client.counts = Default::default();
|
||||
let server = self.col.storage.sanity_check_info()?;
|
||||
Ok(SanityCheckOut {
|
||||
Ok(SanityCheckResponse {
|
||||
status: if client == server {
|
||||
SanityCheckStatus::Ok
|
||||
} else {
|
||||
|
@ -29,9 +29,10 @@ export async function getChangeNotetypeInfo(
|
||||
}
|
||||
|
||||
export async function changeNotetype(
|
||||
input: pb.BackendProto.ChangeNotetypeIn
|
||||
input: pb.BackendProto.ChangeNotetypeRequest
|
||||
): Promise<void> {
|
||||
const data: Uint8Array = pb.BackendProto.ChangeNotetypeIn.encode(input).finish();
|
||||
const data: Uint8Array =
|
||||
pb.BackendProto.ChangeNotetypeRequest.encode(input).finish();
|
||||
await postRequest("/_anki/changeNotetype", data);
|
||||
return;
|
||||
}
|
||||
@ -113,13 +114,13 @@ export class ChangeNotetypeInfoWrapper {
|
||||
);
|
||||
}
|
||||
|
||||
input(): pb.BackendProto.ChangeNotetypeIn {
|
||||
return this.info.input as pb.BackendProto.ChangeNotetypeIn;
|
||||
input(): pb.BackendProto.ChangeNotetypeRequest {
|
||||
return this.info.input as pb.BackendProto.ChangeNotetypeRequest;
|
||||
}
|
||||
|
||||
/// Pack changes back into input message for saving.
|
||||
intoInput(): pb.BackendProto.ChangeNotetypeIn {
|
||||
const input = this.info.input as pb.BackendProto.ChangeNotetypeIn;
|
||||
intoInput(): pb.BackendProto.ChangeNotetypeRequest {
|
||||
const input = this.info.input as pb.BackendProto.ChangeNotetypeRequest;
|
||||
input.newFields = nullToNegativeOne(this.fields);
|
||||
if (this.templates) {
|
||||
input.newTemplates = nullToNegativeOne(this.templates);
|
||||
@ -202,7 +203,7 @@ export class ChangeNotetypeState {
|
||||
await changeNotetype(this.dataForSaving());
|
||||
}
|
||||
|
||||
dataForSaving(): pb.BackendProto.ChangeNotetypeIn {
|
||||
dataForSaving(): pb.BackendProto.ChangeNotetypeRequest {
|
||||
return this.info_.intoInput();
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { buildNextLearnMsg } from "./lib";
|
||||
import { bridgeLink } from "lib/bridgecommand";
|
||||
|
||||
export let info: pb.BackendProto.CongratsInfoOut;
|
||||
export let info: pb.BackendProto.CongratsInfoResponse;
|
||||
import * as tr from "lib/i18n";
|
||||
|
||||
const congrats = tr.schedulingCongratulationsFinished();
|
||||
|
@ -7,13 +7,13 @@ import { naturalUnit, unitAmount, unitName } from "lib/time";
|
||||
|
||||
import * as tr from "lib/i18n";
|
||||
|
||||
export async function getCongratsInfo(): Promise<pb.BackendProto.CongratsInfoOut> {
|
||||
return pb.BackendProto.CongratsInfoOut.decode(
|
||||
export async function getCongratsInfo(): Promise<pb.BackendProto.CongratsInfoResponse> {
|
||||
return pb.BackendProto.CongratsInfoResponse.decode(
|
||||
await postRequest("/_anki/congratsInfo", "")
|
||||
);
|
||||
}
|
||||
|
||||
export function buildNextLearnMsg(info: pb.BackendProto.CongratsInfoOut): string {
|
||||
export function buildNextLearnMsg(info: pb.BackendProto.CongratsInfoResponse): string {
|
||||
const secsUntil = info.secsUntilNextLearn;
|
||||
// next learning card not due (/ until tomorrow)?
|
||||
if (secsUntil == 0 || secsUntil > 86_400) {
|
||||
|
@ -21,9 +21,10 @@ export async function getDeckOptionsInfo(
|
||||
}
|
||||
|
||||
export async function saveDeckOptions(
|
||||
input: pb.BackendProto.UpdateDeckConfigsIn
|
||||
input: pb.BackendProto.UpdateDeckConfigsRequest
|
||||
): Promise<void> {
|
||||
const data: Uint8Array = pb.BackendProto.UpdateDeckConfigsIn.encode(input).finish();
|
||||
const data: Uint8Array =
|
||||
pb.BackendProto.UpdateDeckConfigsRequest.encode(input).finish();
|
||||
await postRequest("/_anki/updateDeckConfigs", data);
|
||||
return;
|
||||
}
|
||||
@ -190,7 +191,7 @@ export class DeckOptionsState {
|
||||
this.updateConfigList();
|
||||
}
|
||||
|
||||
dataForSaving(applyToChildren: boolean): pb.BackendProto.UpdateDeckConfigsIn {
|
||||
dataForSaving(applyToChildren: boolean): pb.BackendProto.UpdateDeckConfigsRequest {
|
||||
const modifiedConfigsExcludingCurrent = this.configs
|
||||
.map((c) => c.config)
|
||||
.filter((c, idx) => {
|
||||
@ -204,7 +205,7 @@ export class DeckOptionsState {
|
||||
// current must come last, even if unmodified
|
||||
this.configs[this.selectedIdx].config,
|
||||
];
|
||||
return pb.BackendProto.UpdateDeckConfigsIn.create({
|
||||
return pb.BackendProto.UpdateDeckConfigsRequest.create({
|
||||
targetDeckId: this.targetDeckId,
|
||||
removedConfigIds: this.removedConfigs,
|
||||
configs,
|
||||
|
@ -19,7 +19,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { gatherData, buildHistogram } from "./added";
|
||||
import type { GraphData } from "./added";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
import * as tr from "lib/i18n";
|
||||
export let preferences: PreferenceStore<pb.BackendProto.GraphPreferences>;
|
||||
|
||||
|
@ -14,7 +14,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { renderButtons } from "./buttons";
|
||||
import { defaultGraphBounds, GraphRange, RevlogRange } from "./graph-helpers";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
import * as tr from "lib/i18n";
|
||||
export let revlogRange: RevlogRange;
|
||||
|
||||
|
@ -18,7 +18,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { gatherData, renderCalendar } from "./calendar";
|
||||
import type { GraphData } from "./calendar";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse;
|
||||
export let preferences: PreferenceStore<pb.BackendProto.GraphPreferences>;
|
||||
export let revlogRange: RevlogRange;
|
||||
import * as tr from "lib/i18n";
|
||||
|
@ -15,7 +15,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { gatherData, renderCards } from "./card-counts";
|
||||
import type { GraphData, TableDatum } from "./card-counts";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse;
|
||||
import * as tr2 from "lib/i18n";
|
||||
export let preferences: PreferenceStore<pb.BackendProto.GraphPreferences>;
|
||||
|
||||
|
@ -17,7 +17,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { gatherData, prepareData } from "./ease";
|
||||
import type { TableDatum, SearchEventMap } from "./graph-helpers";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
export let preferences: PreferenceStore<pb.BackendProto.GraphPreferences>;
|
||||
|
||||
const dispatch = createEventDispatcher<SearchEventMap>();
|
||||
|
@ -20,7 +20,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { gatherData, buildHistogram } from "./future-due";
|
||||
import type { GraphData } from "./future-due";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
import * as tr from "lib/i18n";
|
||||
export let preferences: PreferenceStore<pb.BackendProto.GraphPreferences>;
|
||||
|
||||
|
@ -15,7 +15,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { defaultGraphBounds, RevlogRange, GraphRange } from "./graph-helpers";
|
||||
import { renderHours } from "./hours";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
import * as tr from "lib/i18n";
|
||||
export let revlogRange: RevlogRange;
|
||||
let graphRange: GraphRange = GraphRange.Year;
|
||||
|
@ -23,7 +23,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import type { IntervalGraphData } from "./intervals";
|
||||
import type { TableDatum, SearchEventMap } from "./graph-helpers";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
import * as tr from "lib/i18n";
|
||||
export let preferences: PreferenceStore<pb.BackendProto.GraphPreferences>;
|
||||
|
||||
|
@ -19,7 +19,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import { gatherData, renderReviews } from "./reviews";
|
||||
import type { GraphData } from "./reviews";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
export let revlogRange: RevlogRange;
|
||||
import * as tr from "lib/i18n";
|
||||
|
||||
|
@ -10,7 +10,7 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import type { TodayData } from "./today";
|
||||
import { gatherData } from "./today";
|
||||
|
||||
export let sourceData: pb.BackendProto.GraphsOut | null = null;
|
||||
export let sourceData: pb.BackendProto.GraphsResponse | null = null;
|
||||
|
||||
let todayData: TodayData | null = null;
|
||||
$: if (sourceData) {
|
||||
|
@ -21,8 +21,8 @@ License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
async function getGraphData(
|
||||
search: string,
|
||||
days: number
|
||||
): Promise<pb.BackendProto.GraphsOut> {
|
||||
return pb.BackendProto.GraphsOut.decode(
|
||||
): Promise<pb.BackendProto.GraphsResponse> {
|
||||
return pb.BackendProto.GraphsResponse.decode(
|
||||
await postRequest("/_anki/graphData", JSON.stringify({ search, days }))
|
||||
);
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ export interface GraphData {
|
||||
daysAdded: number[];
|
||||
}
|
||||
|
||||
export function gatherData(data: pb.BackendProto.GraphsOut): GraphData {
|
||||
export function gatherData(data: pb.BackendProto.GraphsResponse): GraphData {
|
||||
const daysAdded = (data.cards as pb.BackendProto.Card[]).map((card) => {
|
||||
const elapsedSecs = (card.id as number) / 1000 - data.nextDayAtSecs;
|
||||
return Math.ceil(elapsedSecs / 86400);
|
||||
|
@ -39,7 +39,7 @@ export interface GraphData {
|
||||
const ReviewKind = pb.BackendProto.RevlogEntry.ReviewKind;
|
||||
|
||||
export function gatherData(
|
||||
data: pb.BackendProto.GraphsOut,
|
||||
data: pb.BackendProto.GraphsResponse,
|
||||
range: GraphRange
|
||||
): GraphData {
|
||||
const cutoff = millisecondCutoffForRange(range, data.nextDayAtSecs);
|
||||
@ -99,7 +99,7 @@ interface TotalCorrect {
|
||||
export function renderButtons(
|
||||
svgElem: SVGElement,
|
||||
bounds: GraphBounds,
|
||||
origData: pb.BackendProto.GraphsOut,
|
||||
origData: pb.BackendProto.GraphsResponse,
|
||||
range: GraphRange
|
||||
): void {
|
||||
const sourceData = gatherData(origData, range);
|
||||
|
@ -53,7 +53,7 @@ type WeekdayType = pb.BackendProto.GraphPreferences.Weekday;
|
||||
const Weekday = pb.BackendProto.GraphPreferences.Weekday; /* enum */
|
||||
|
||||
export function gatherData(
|
||||
data: pb.BackendProto.GraphsOut,
|
||||
data: pb.BackendProto.GraphsResponse,
|
||||
firstDayOfWeek: WeekdayType
|
||||
): GraphData {
|
||||
const reviewCount = new Map<number, number>();
|
||||
|
@ -127,7 +127,7 @@ function countCards(
|
||||
}
|
||||
|
||||
export function gatherData(
|
||||
data: pb.BackendProto.GraphsOut,
|
||||
data: pb.BackendProto.GraphsResponse,
|
||||
separateInactive: boolean
|
||||
): GraphData {
|
||||
const totalCards = data.cards.length;
|
||||
|
@ -26,7 +26,7 @@ export interface GraphData {
|
||||
eases: number[];
|
||||
}
|
||||
|
||||
export function gatherData(data: pb.BackendProto.GraphsOut): GraphData {
|
||||
export function gatherData(data: pb.BackendProto.GraphsResponse): GraphData {
|
||||
const eases = (data.cards as pb.BackendProto.Card[])
|
||||
.filter((c) => [CardType.Review, CardType.Relearn].includes(c.ctype))
|
||||
.map((c) => c.easeFactor / 10);
|
||||
|
@ -30,7 +30,7 @@ export interface GraphData {
|
||||
haveBacklog: boolean;
|
||||
}
|
||||
|
||||
export function gatherData(data: pb.BackendProto.GraphsOut): GraphData {
|
||||
export function gatherData(data: pb.BackendProto.GraphsResponse): GraphData {
|
||||
const isLearning = (card: pb.BackendProto.Card): boolean =>
|
||||
[CardQueue.Learn, CardQueue.PreviewRepeat].includes(card.queue);
|
||||
|
||||
@ -75,7 +75,7 @@ function binValue(d: Bin<Map<number, number>, number>): number {
|
||||
return sum(d, (d) => d[1]);
|
||||
}
|
||||
|
||||
export interface FutureDueOut {
|
||||
export interface FutureDueResponse {
|
||||
histogramData: HistogramData | null;
|
||||
tableData: TableDatum[];
|
||||
}
|
||||
@ -96,7 +96,7 @@ export function buildHistogram(
|
||||
backlog: boolean,
|
||||
dispatch: SearchDispatch,
|
||||
browserLinksSupported: boolean
|
||||
): FutureDueOut {
|
||||
): FutureDueResponse {
|
||||
const output = { histogramData: null, tableData: [] };
|
||||
// get min/max
|
||||
const data = sourceData.dueCounts;
|
||||
|
@ -39,7 +39,7 @@ interface Hour {
|
||||
|
||||
const ReviewKind = pb.BackendProto.RevlogEntry.ReviewKind;
|
||||
|
||||
function gatherData(data: pb.BackendProto.GraphsOut, range: GraphRange): Hour[] {
|
||||
function gatherData(data: pb.BackendProto.GraphsResponse, range: GraphRange): Hour[] {
|
||||
const hours = [...Array(24)].map((_n, idx: number) => {
|
||||
return { hour: idx, totalCount: 0, correctCount: 0 } as Hour;
|
||||
});
|
||||
@ -74,7 +74,7 @@ function gatherData(data: pb.BackendProto.GraphsOut, range: GraphRange): Hour[]
|
||||
export function renderHours(
|
||||
svgElem: SVGElement,
|
||||
bounds: GraphBounds,
|
||||
origData: pb.BackendProto.GraphsOut,
|
||||
origData: pb.BackendProto.GraphsResponse,
|
||||
range: GraphRange
|
||||
): void {
|
||||
const data = gatherData(origData, range);
|
||||
|
@ -36,7 +36,9 @@ export enum IntervalRange {
|
||||
All = 3,
|
||||
}
|
||||
|
||||
export function gatherIntervalData(data: pb.BackendProto.GraphsOut): IntervalGraphData {
|
||||
export function gatherIntervalData(
|
||||
data: pb.BackendProto.GraphsResponse
|
||||
): IntervalGraphData {
|
||||
const intervals = (data.cards as pb.BackendProto.Card[])
|
||||
.filter((c) => [CardType.Review, CardType.Relearn].includes(c.ctype))
|
||||
.map((c) => c.interval);
|
||||
|
@ -53,7 +53,7 @@ export interface GraphData {
|
||||
const ReviewKind = pb.BackendProto.RevlogEntry.ReviewKind;
|
||||
type BinType = Bin<Map<number, Reviews[]>, number>;
|
||||
|
||||
export function gatherData(data: pb.BackendProto.GraphsOut): GraphData {
|
||||
export function gatherData(data: pb.BackendProto.GraphsResponse): GraphData {
|
||||
const reviewCount = new Map<number, Reviews>();
|
||||
const reviewTime = new Map<number, Reviews>();
|
||||
const empty = { mature: 0, young: 0, learn: 0, relearn: 0, early: 0 };
|
||||
|
@ -13,7 +13,7 @@ export interface TodayData {
|
||||
|
||||
const ReviewKind = pb.BackendProto.RevlogEntry.ReviewKind;
|
||||
|
||||
export function gatherData(data: pb.BackendProto.GraphsOut): TodayData {
|
||||
export function gatherData(data: pb.BackendProto.GraphsResponse): TodayData {
|
||||
let answerCount = 0;
|
||||
let answerMillis = 0;
|
||||
let correctCount = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user