Merge branch 'master' into top_toolbar_links_hook
This commit is contained in:
commit
33f7d7ed50
@ -25,6 +25,7 @@ zjosua <zjosua@hotmail.com>
|
||||
Arthur Milchior <arthur@milchior.fr>
|
||||
Yngve Hoiseth <yngve@hoiseth.net>
|
||||
Ijgnd
|
||||
Yoonchae Lee
|
||||
|
||||
********************
|
||||
|
||||
|
12
Makefile
12
Makefile
@ -8,8 +8,9 @@ SUBMAKE := $(MAKE) --print-directory
|
||||
|
||||
BUILDFLAGS := --release --strip
|
||||
RUNFLAGS :=
|
||||
CHECKABLE := rslib pylib qt
|
||||
DEVEL := rspy pylib qt
|
||||
CHECKABLE_PY := pylib qt
|
||||
CHECKABLE_RS := rslib
|
||||
DEVEL := rslib rspy pylib qt
|
||||
|
||||
.PHONY: all
|
||||
all: run
|
||||
@ -80,10 +81,13 @@ clean-dist:
|
||||
.PHONY: check
|
||||
check: pyenv buildhash
|
||||
@set -e && \
|
||||
for dir in $(CHECKABLE_RS); do \
|
||||
$(SUBMAKE) -C $$dir check; \
|
||||
done; \
|
||||
. pyenv/bin/activate && \
|
||||
$(SUBMAKE) -C rspy develop && \
|
||||
$(SUBMAKE) -C pylib develop && \
|
||||
for dir in $(CHECKABLE); do \
|
||||
for dir in $(CHECKABLE_PY); do \
|
||||
$(SUBMAKE) -C $$dir check; \
|
||||
done;
|
||||
@echo
|
||||
@ -93,7 +97,7 @@ check: pyenv buildhash
|
||||
fix:
|
||||
@set -e && \
|
||||
. pyenv/bin/activate && \
|
||||
for dir in $(CHECKABLE); do \
|
||||
for dir in $(CHECKABLE_RS) $(CHECKABLE_PY); do \
|
||||
$(SUBMAKE) -C $$dir fix; \
|
||||
done; \
|
||||
|
||||
|
@ -1 +1 @@
|
||||
2.1.20
|
||||
2.1.21
|
||||
|
@ -4,6 +4,25 @@ package backend_proto;
|
||||
|
||||
message Empty {}
|
||||
|
||||
message BackendInit {
|
||||
string collection_path = 1;
|
||||
string media_folder_path = 2;
|
||||
string media_db_path = 3;
|
||||
repeated string preferred_langs = 4;
|
||||
string locale_folder_path = 5;
|
||||
}
|
||||
|
||||
enum StringsGroup {
|
||||
OTHER = 0;
|
||||
TEST = 1;
|
||||
MEDIA_CHECK = 2;
|
||||
CARD_TEMPLATES = 3;
|
||||
SYNC = 4;
|
||||
NETWORK = 5;
|
||||
STATISTICS = 6;
|
||||
FILTERING = 7;
|
||||
}
|
||||
|
||||
// 1-15 reserved for future use; 2047 for errors
|
||||
|
||||
message BackendInput {
|
||||
@ -18,7 +37,12 @@ message BackendInput {
|
||||
int64 local_minutes_west = 22;
|
||||
string strip_av_tags = 23;
|
||||
ExtractAVTagsIn extract_av_tags = 24;
|
||||
string expand_clozes_to_reveal_latex = 25;
|
||||
ExtractLatexIn extract_latex = 25;
|
||||
AddMediaFileIn add_media_file = 26;
|
||||
SyncMediaIn sync_media = 27;
|
||||
Empty check_media = 28;
|
||||
TrashMediaFilesIn trash_media_files = 29;
|
||||
TranslateStringIn translate_string = 30;
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,7 +57,12 @@ message BackendOutput {
|
||||
sint32 local_minutes_west = 22;
|
||||
string strip_av_tags = 23;
|
||||
ExtractAVTagsOut extract_av_tags = 24;
|
||||
string expand_clozes_to_reveal_latex = 25;
|
||||
ExtractLatexOut extract_latex = 25;
|
||||
string add_media_file = 26;
|
||||
Empty sync_media = 27;
|
||||
MediaCheckOut check_media = 28;
|
||||
Empty trash_media_files = 29;
|
||||
string translate_string = 30;
|
||||
|
||||
BackendError error = 2047;
|
||||
}
|
||||
@ -41,18 +70,69 @@ message BackendOutput {
|
||||
|
||||
message BackendError {
|
||||
oneof value {
|
||||
InvalidInputError invalid_input = 1;
|
||||
StringError invalid_input = 1;
|
||||
TemplateParseError template_parse = 2;
|
||||
StringError io_error = 3;
|
||||
StringError db_error = 4;
|
||||
NetworkError network_error = 5;
|
||||
SyncError sync_error = 6;
|
||||
// user interrupted operation
|
||||
Empty interrupted = 8;
|
||||
}
|
||||
}
|
||||
|
||||
message InvalidInputError {
|
||||
message Progress {
|
||||
oneof value {
|
||||
MediaSyncProgress media_sync = 1;
|
||||
string media_check = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message StringError {
|
||||
string info = 1;
|
||||
}
|
||||
|
||||
message TemplateParseError {
|
||||
string info = 1;
|
||||
bool q_side = 2;
|
||||
}
|
||||
|
||||
message NetworkError {
|
||||
string info = 1;
|
||||
enum NetworkErrorKind {
|
||||
OTHER = 0;
|
||||
OFFLINE = 1;
|
||||
TIMEOUT = 2;
|
||||
PROXY_AUTH = 3;
|
||||
}
|
||||
NetworkErrorKind kind = 2;
|
||||
string localized = 3;
|
||||
}
|
||||
|
||||
message SyncError {
|
||||
string info = 1;
|
||||
enum SyncErrorKind {
|
||||
OTHER = 0;
|
||||
CONFLICT = 1;
|
||||
SERVER_ERROR = 2;
|
||||
CLIENT_TOO_OLD = 3;
|
||||
AUTH_FAILED = 4;
|
||||
SERVER_MESSAGE = 5;
|
||||
MEDIA_CHECK_REQUIRED = 6;
|
||||
RESYNC_REQUIRED = 7;
|
||||
}
|
||||
SyncErrorKind kind = 2;
|
||||
string localized = 3;
|
||||
}
|
||||
|
||||
message MediaSyncProgress {
|
||||
string checked = 1;
|
||||
string added = 2;
|
||||
string removed = 3;
|
||||
}
|
||||
|
||||
message MediaSyncUploadProgress {
|
||||
uint32 files = 1;
|
||||
uint32 deletions = 2;
|
||||
}
|
||||
|
||||
message TemplateRequirementsIn {
|
||||
@ -174,3 +254,51 @@ message TTSTag {
|
||||
float speed = 4;
|
||||
repeated string other_args = 5;
|
||||
}
|
||||
|
||||
message ExtractLatexIn {
|
||||
string text = 1;
|
||||
bool svg = 2;
|
||||
}
|
||||
|
||||
message ExtractLatexOut {
|
||||
string text = 1;
|
||||
repeated ExtractedLatex latex = 2;
|
||||
}
|
||||
|
||||
message ExtractedLatex {
|
||||
string filename = 1;
|
||||
string latex_body = 2;
|
||||
}
|
||||
|
||||
message AddMediaFileIn {
|
||||
string desired_name = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
message SyncMediaIn {
|
||||
string hkey = 1;
|
||||
string endpoint = 2;
|
||||
}
|
||||
|
||||
message MediaCheckOut {
|
||||
repeated string unused = 1;
|
||||
repeated string missing = 2;
|
||||
string report = 3;
|
||||
}
|
||||
|
||||
message TrashMediaFilesIn {
|
||||
repeated string fnames = 1;
|
||||
}
|
||||
|
||||
message TranslateStringIn {
|
||||
StringsGroup group = 1;
|
||||
string key = 2;
|
||||
map<string,TranslateArgValue> args = 3;
|
||||
}
|
||||
|
||||
message TranslateArgValue {
|
||||
oneof value {
|
||||
string str = 1;
|
||||
string number = 2;
|
||||
}
|
||||
}
|
||||
|
@ -48,8 +48,8 @@ class Card:
|
||||
self.id = timestampID(col.db, "cards")
|
||||
self.did = 1
|
||||
self.crt = intTime()
|
||||
self.type = 0
|
||||
self.queue = 0
|
||||
self.type = CARD_TYPE_NEW
|
||||
self.queue = QUEUE_TYPE_NEW
|
||||
self.ivl = 0
|
||||
self.factor = 0
|
||||
self.reps = 0
|
||||
@ -84,13 +84,21 @@ class Card:
|
||||
self._render_output = None
|
||||
self._note = None
|
||||
|
||||
def flush(self) -> None:
|
||||
def _preFlush(self) -> None:
|
||||
hooks.card_will_flush(self)
|
||||
self.mod = intTime()
|
||||
self.usn = self.col.usn()
|
||||
# bug check
|
||||
if self.queue == 2 and self.odue and not self.col.decks.isDyn(self.did):
|
||||
if (
|
||||
self.queue == QUEUE_TYPE_REV
|
||||
and self.odue
|
||||
and not self.col.decks.isDyn(self.did)
|
||||
):
|
||||
hooks.card_odue_was_invalid()
|
||||
assert self.due < 4294967296
|
||||
|
||||
def flush(self) -> None:
|
||||
self._preFlush()
|
||||
self.col.db.execute(
|
||||
"""
|
||||
insert or replace into cards values
|
||||
@ -117,12 +125,8 @@ insert or replace into cards values
|
||||
self.col.log(self)
|
||||
|
||||
def flushSched(self) -> None:
|
||||
self.mod = intTime()
|
||||
self.usn = self.col.usn()
|
||||
self._preFlush()
|
||||
# bug checks
|
||||
if self.queue == 2 and self.odue and not self.col.decks.isDyn(self.did):
|
||||
hooks.card_odue_was_invalid()
|
||||
assert self.due < 4294967296
|
||||
self.col.db.execute(
|
||||
"""update cards set
|
||||
mod=?, usn=?, type=?, queue=?, due=?, ivl=?, factor=?, reps=?,
|
||||
|
@ -14,6 +14,22 @@ NEW_CARDS_FIRST = 2
|
||||
NEW_CARDS_RANDOM = 0
|
||||
NEW_CARDS_DUE = 1
|
||||
|
||||
# Queue types
|
||||
QUEUE_TYPE_MANUALLY_BURIED = -3
|
||||
QUEUE_TYPE_SIBLING_BURIED = -2
|
||||
QUEUE_TYPE_SUSPENDED = -1
|
||||
QUEUE_TYPE_NEW = 0
|
||||
QUEUE_TYPE_LRN = 1
|
||||
QUEUE_TYPE_REV = 2
|
||||
QUEUE_TYPE_DAY_LEARN_RELEARN = 3
|
||||
QUEUE_TYPE_PREVIEW = 4
|
||||
|
||||
# Card types
|
||||
CARD_TYPE_NEW = 0
|
||||
CARD_TYPE_LRN = 1
|
||||
CARD_TYPE_REV = 2
|
||||
CARD_TYPE_RELEARNING = 3
|
||||
|
||||
# removal types
|
||||
REM_CARD = 0
|
||||
REM_NOTE = 1
|
||||
@ -27,6 +43,10 @@ COUNT_REMAINING = 1
|
||||
MEDIA_ADD = 0
|
||||
MEDIA_REM = 1
|
||||
|
||||
# Kind of decks
|
||||
DECK_STD = 0
|
||||
DECK_DYN = 1
|
||||
|
||||
# dynamic deck order
|
||||
DYN_OLDEST = 0
|
||||
DYN_RANDOM = 1
|
||||
@ -55,6 +75,22 @@ SYNC_VER = 9
|
||||
|
||||
HELP_SITE = "http://ankisrs.net/docs/manual.html"
|
||||
|
||||
# Leech actions
|
||||
LEECH_SUSPEND = 0
|
||||
LEECH_TAGONLY = 1
|
||||
|
||||
# Buttons
|
||||
BUTTON_ONE = 1
|
||||
BUTTON_TWO = 2
|
||||
BUTTON_THREE = 3
|
||||
BUTTON_FOUR = 4
|
||||
|
||||
# Revlog types
|
||||
REVLOG_LRN = 0
|
||||
REVLOG_REV = 1
|
||||
REVLOG_RELRN = 2
|
||||
REVLOG_CRAM = 3
|
||||
|
||||
# Labels
|
||||
##########################################################################
|
||||
|
||||
|
@ -27,7 +27,7 @@ defaultDeck = {
|
||||
"conf": 1,
|
||||
"usn": 0,
|
||||
"desc": "",
|
||||
"dyn": 0, # anki uses int/bool interchangably here
|
||||
"dyn": DECK_STD, # anki uses int/bool interchangably here
|
||||
"collapsed": False,
|
||||
# added in beta11
|
||||
"extendNew": 10,
|
||||
@ -40,7 +40,7 @@ defaultDynamicDeck = {
|
||||
"lrnToday": [0, 0],
|
||||
"timeToday": [0, 0],
|
||||
"collapsed": False,
|
||||
"dyn": 1,
|
||||
"dyn": DECK_DYN,
|
||||
"desc": "",
|
||||
"usn": 0,
|
||||
"delays": None,
|
||||
@ -71,7 +71,7 @@ defaultConf = {
|
||||
"minInt": 1,
|
||||
"leechFails": 8,
|
||||
# type 0=suspend, 1=tagonly
|
||||
"leechAction": 0,
|
||||
"leechAction": LEECH_SUSPEND,
|
||||
},
|
||||
"rev": {
|
||||
"perDay": 200,
|
||||
|
@ -240,7 +240,7 @@ select distinct(n.id) from cards c, notes n where c.nid=n.id and """
|
||||
elif type == "cardDue":
|
||||
sort = "c.type, c.due"
|
||||
elif type == "cardEase":
|
||||
sort = "c.type == 0, c.factor"
|
||||
sort = f"c.type == {CARD_TYPE_NEW}, c.factor"
|
||||
elif type == "cardLapses":
|
||||
sort = "c.lapses"
|
||||
elif type == "cardIvl":
|
||||
@ -271,18 +271,18 @@ select distinct(n.id) from cards c, notes n where c.nid=n.id and """
|
||||
if val == "review":
|
||||
n = 2
|
||||
elif val == "new":
|
||||
n = 0
|
||||
n = CARD_TYPE_NEW
|
||||
else:
|
||||
return "queue in (1, 3)"
|
||||
return f"queue in ({QUEUE_TYPE_LRN}, {QUEUE_TYPE_DAY_LEARN_RELEARN})"
|
||||
return "type = %d" % n
|
||||
elif val == "suspended":
|
||||
return "c.queue = -1"
|
||||
elif val == "buried":
|
||||
return "c.queue in (-2, -3)"
|
||||
return f"c.queue in ({QUEUE_TYPE_SIBLING_BURIED}, {QUEUE_TYPE_MANUALLY_BURIED})"
|
||||
elif val == "due":
|
||||
return """
|
||||
(c.queue in (2,3) and c.due <= %d) or
|
||||
(c.queue = 1 and c.due <= %d)""" % (
|
||||
return f"""
|
||||
(c.queue in ({QUEUE_TYPE_REV},{QUEUE_TYPE_DAY_LEARN_RELEARN}) and c.due <= %d) or
|
||||
(c.queue = {QUEUE_TYPE_LRN} and c.due <= %d)""" % (
|
||||
self.col.sched.today,
|
||||
self.col.sched.dayCutoff,
|
||||
)
|
||||
@ -349,7 +349,7 @@ select distinct(n.id) from cards c, notes n where c.nid=n.id and """
|
||||
if prop == "due":
|
||||
val += self.col.sched.today
|
||||
# only valid for review/daily learning
|
||||
q.append("(c.queue in (2,3))")
|
||||
q.append(f"(c.queue in ({QUEUE_TYPE_REV},{QUEUE_TYPE_DAY_LEARN_RELEARN}))")
|
||||
elif prop == "ease":
|
||||
prop = "factor"
|
||||
val = int(val * 1000)
|
||||
|
@ -18,6 +18,7 @@ import decorator
|
||||
|
||||
import anki
|
||||
from anki.cards import Card
|
||||
from anki.notes import Note
|
||||
|
||||
# New hook/filter handling
|
||||
##############################################################################
|
||||
@ -27,6 +28,33 @@ from anki.cards import Card
|
||||
# @@AUTOGEN@@
|
||||
|
||||
|
||||
class _BgThreadProgressCallbackFilter:
|
||||
"""Warning: this is called on a background thread."""
|
||||
|
||||
_hooks: List[Callable[[bool, "anki.rsbackend.Progress"], bool]] = []
|
||||
|
||||
def append(self, cb: Callable[[bool, "anki.rsbackend.Progress"], bool]) -> None:
|
||||
"""(proceed: bool, progress: anki.rsbackend.Progress)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(self, cb: Callable[[bool, "anki.rsbackend.Progress"], bool]) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(self, proceed: bool, progress: anki.rsbackend.Progress) -> bool:
|
||||
for filter in self._hooks:
|
||||
try:
|
||||
proceed = filter(proceed, progress)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(filter)
|
||||
raise
|
||||
return proceed
|
||||
|
||||
|
||||
bg_thread_progress_callback = _BgThreadProgressCallbackFilter()
|
||||
|
||||
|
||||
class _CardDidLeechHook:
|
||||
_hooks: List[Callable[[Card], None]] = []
|
||||
|
||||
@ -133,6 +161,32 @@ class _CardOdueWasInvalidHook:
|
||||
card_odue_was_invalid = _CardOdueWasInvalidHook()
|
||||
|
||||
|
||||
class _CardWillFlushHook:
|
||||
"""Allow to change a card before it is added/updated in the database."""
|
||||
|
||||
_hooks: List[Callable[[Card], None]] = []
|
||||
|
||||
def append(self, cb: Callable[[Card], None]) -> None:
|
||||
"""(card: Card)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(self, cb: Callable[[Card], None]) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(self, card: Card) -> None:
|
||||
for hook in self._hooks:
|
||||
try:
|
||||
hook(card)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(hook)
|
||||
raise
|
||||
|
||||
|
||||
card_will_flush = _CardWillFlushHook()
|
||||
|
||||
|
||||
class _DeckAddedHook:
|
||||
_hooks: List[Callable[[Dict[str, Any]], None]] = []
|
||||
|
||||
@ -277,6 +331,32 @@ class _NoteTypeAddedHook:
|
||||
note_type_added = _NoteTypeAddedHook()
|
||||
|
||||
|
||||
class _NoteWillFlushHook:
|
||||
"""Allow to change a note before it is added/updated in the database."""
|
||||
|
||||
_hooks: List[Callable[[Note], None]] = []
|
||||
|
||||
def append(self, cb: Callable[[Note], None]) -> None:
|
||||
"""(note: Note)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(self, cb: Callable[[Note], None]) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(self, note: Note) -> None:
|
||||
for hook in self._hooks:
|
||||
try:
|
||||
hook(note)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(hook)
|
||||
raise
|
||||
|
||||
|
||||
note_will_flush = _NoteWillFlushHook()
|
||||
|
||||
|
||||
class _NotesWillBeDeletedHook:
|
||||
_hooks: List[Callable[["anki.storage._Collection", List[int]], None]] = []
|
||||
|
||||
|
@ -6,6 +6,7 @@ import unicodedata
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from anki.collection import _Collection
|
||||
from anki.consts import *
|
||||
from anki.importing.base import Importer
|
||||
from anki.lang import _
|
||||
from anki.storage import Collection
|
||||
@ -343,7 +344,10 @@ class Anki2Importer(Importer):
|
||||
card[4] = intTime()
|
||||
card[5] = usn
|
||||
# review cards have a due date relative to collection
|
||||
if card[7] in (2, 3) or card[6] == 2:
|
||||
if (
|
||||
card[7] in (QUEUE_TYPE_REV, QUEUE_TYPE_DAY_LEARN_RELEARN)
|
||||
or card[6] == CARD_TYPE_REV
|
||||
):
|
||||
card[8] -= aheadBy
|
||||
# odue needs updating too
|
||||
if card[14]:
|
||||
@ -356,13 +360,13 @@ class Anki2Importer(Importer):
|
||||
card[8] = card[14]
|
||||
card[14] = 0
|
||||
# queue
|
||||
if card[6] == 1: # type
|
||||
card[7] = 0
|
||||
if card[6] == CARD_TYPE_LRN: # type
|
||||
card[7] = QUEUE_TYPE_NEW
|
||||
else:
|
||||
card[7] = card[6]
|
||||
# type
|
||||
if card[6] == 1:
|
||||
card[6] = 0
|
||||
if card[6] == CARD_TYPE_LRN:
|
||||
card[6] = CARD_TYPE_NEW
|
||||
cards.append(card)
|
||||
# we need to import revlog, rewriting card ids and bumping usn
|
||||
for rev in self.src.db.execute("select * from revlog where cid = ?", scid):
|
||||
|
@ -52,9 +52,12 @@ class ForeignCard:
|
||||
# If the first field of the model is not in the map, the map is invalid.
|
||||
|
||||
# The import mode is one of:
|
||||
# 0: update if first field matches existing note
|
||||
# 1: ignore if first field matches existing note
|
||||
# 2: import even if first field matches existing note
|
||||
# UPDATE_MODE: update if first field matches existing note
|
||||
# IGNORE_MODE: ignore if first field matches existing note
|
||||
# ADD_MODE: import even if first field matches existing note
|
||||
UPDATE_MODE = 0
|
||||
IGNORE_MODE = 1
|
||||
ADD_MODE = 2
|
||||
|
||||
|
||||
class NoteImporter(Importer):
|
||||
@ -62,7 +65,7 @@ class NoteImporter(Importer):
|
||||
needMapper = True
|
||||
needDelimiter = False
|
||||
allowHTML = False
|
||||
importMode = 0
|
||||
importMode = UPDATE_MODE
|
||||
mapping: Optional[List[str]]
|
||||
tagModified: Optional[str]
|
||||
|
||||
@ -153,7 +156,7 @@ class NoteImporter(Importer):
|
||||
self.log.append(_("Empty first field: %s") % " ".join(n.fields))
|
||||
continue
|
||||
# earlier in import?
|
||||
if fld0 in firsts and self.importMode != 2:
|
||||
if fld0 in firsts and self.importMode != ADD_MODE:
|
||||
# duplicates in source file; log and ignore
|
||||
self.log.append(_("Appeared twice in file: %s") % fld0)
|
||||
continue
|
||||
@ -168,16 +171,16 @@ class NoteImporter(Importer):
|
||||
if fld0 == sflds[0]:
|
||||
# duplicate
|
||||
found = True
|
||||
if self.importMode == 0:
|
||||
if self.importMode == UPDATE_MODE:
|
||||
data = self.updateData(n, id, sflds)
|
||||
if data:
|
||||
updates.append(data)
|
||||
updateLog.append(updateLogTxt % fld0)
|
||||
dupeCount += 1
|
||||
found = True
|
||||
elif self.importMode == 1:
|
||||
elif self.importMode == IGNORE_MODE:
|
||||
dupeCount += 1
|
||||
elif self.importMode == 2:
|
||||
elif self.importMode == ADD_MODE:
|
||||
# allow duplicates in this case
|
||||
if fld0 not in dupes:
|
||||
# only show message once, no matter how many
|
||||
@ -214,9 +217,9 @@ class NoteImporter(Importer):
|
||||
ngettext("%d note updated", "%d notes updated", self.updateCount)
|
||||
% self.updateCount
|
||||
)
|
||||
if self.importMode == 0:
|
||||
if self.importMode == UPDATE_MODE:
|
||||
unchanged = dupeCount - self.updateCount
|
||||
elif self.importMode == 1:
|
||||
elif self.importMode == IGNORE_MODE:
|
||||
unchanged = dupeCount
|
||||
else:
|
||||
unchanged = 0
|
||||
|
@ -4,9 +4,9 @@
|
||||
# Please leave the coding line in this file to prevent xgettext complaining.
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
from typing import Any
|
||||
from typing import Optional, Union
|
||||
|
||||
langs = sorted(
|
||||
[
|
||||
@ -25,7 +25,7 @@ langs = sorted(
|
||||
("Galego", "gl_ES"),
|
||||
("Hrvatski", "hr_HR"),
|
||||
("Italiano", "it_IT"),
|
||||
("lo jbobau", "jbo"),
|
||||
("lo jbobau", "jbo_EN"),
|
||||
("Lenga d'òc", "oc_FR"),
|
||||
("Magyar", "hu_HU"),
|
||||
("Nederlands", "nl_NL"),
|
||||
@ -106,61 +106,66 @@ compatMap = {
|
||||
"vi": "vi_VN",
|
||||
}
|
||||
|
||||
threadLocal = threading.local()
|
||||
|
||||
# global defaults
|
||||
currentLang: Any = None
|
||||
currentTranslation: Any = None
|
||||
def lang_to_disk_lang(lang: str) -> str:
|
||||
"""Normalize lang, then convert it to name used on disk."""
|
||||
# convert it into our canonical representation first
|
||||
lang = lang.replace("-", "_")
|
||||
if lang in compatMap:
|
||||
lang = compatMap[lang]
|
||||
|
||||
# these language/region combinations are fully qualified, but with a hyphen
|
||||
if lang in (
|
||||
"en_GB",
|
||||
"es_ES",
|
||||
"ga_IE",
|
||||
"hy_AM",
|
||||
"nb_NO",
|
||||
"nn_NO",
|
||||
"pt_BR",
|
||||
"pt_PT",
|
||||
"sv_SE",
|
||||
"zh_CN",
|
||||
"zh_TW",
|
||||
):
|
||||
return lang.replace("_", "-")
|
||||
# other languages have the region portion stripped
|
||||
return re.match("(.*)_", lang).group(1)
|
||||
|
||||
|
||||
def localTranslation() -> Any:
|
||||
"Return the translation local to this thread, or the default."
|
||||
if getattr(threadLocal, "currentTranslation", None):
|
||||
return threadLocal.currentTranslation
|
||||
else:
|
||||
return currentTranslation
|
||||
# the currently set interface language
|
||||
currentLang = "en"
|
||||
|
||||
# the current translation catalog
|
||||
current_catalog: Optional[
|
||||
Union[gettext.NullTranslations, gettext.GNUTranslations]
|
||||
] = None
|
||||
|
||||
# path to locale folder
|
||||
locale_folder = ""
|
||||
|
||||
|
||||
def _(str: str) -> str:
|
||||
return localTranslation().gettext(str)
|
||||
if current_catalog:
|
||||
return current_catalog.gettext(str)
|
||||
else:
|
||||
return str
|
||||
|
||||
|
||||
def ngettext(single: str, plural: str, n: int) -> str:
|
||||
return localTranslation().ngettext(single, plural, n)
|
||||
if current_catalog:
|
||||
return current_catalog.ngettext(single, plural, n)
|
||||
elif n == 1:
|
||||
return single
|
||||
return plural
|
||||
|
||||
|
||||
def setLang(lang: str, locale_dir: str, local: bool = True) -> None:
|
||||
lang = mungeCode(lang)
|
||||
trans = gettext.translation("anki", locale_dir, languages=[lang], fallback=True)
|
||||
if local:
|
||||
threadLocal.currentLang = lang
|
||||
threadLocal.currentTranslation = trans
|
||||
else:
|
||||
global currentLang, currentTranslation
|
||||
currentLang = lang
|
||||
currentTranslation = trans
|
||||
def set_lang(lang: str, locale_dir: str) -> None:
|
||||
global currentLang, current_catalog, locale_folder
|
||||
gettext_dir = os.path.join(locale_dir, "gettext")
|
||||
|
||||
|
||||
def getLang() -> str:
|
||||
"Return the language local to this thread, or the default."
|
||||
if getattr(threadLocal, "currentLang", None):
|
||||
return threadLocal.currentLang
|
||||
else:
|
||||
return currentLang
|
||||
|
||||
|
||||
def noHint(str) -> str:
|
||||
"Remove translation hint from end of string."
|
||||
return re.sub(r"(^.*?)( ?\(.+?\))?$", "\\1", str)
|
||||
|
||||
|
||||
def mungeCode(code: str) -> Any:
|
||||
code = code.replace("-", "_")
|
||||
if code in compatMap:
|
||||
code = compatMap[code]
|
||||
|
||||
return code
|
||||
|
||||
|
||||
if not currentTranslation:
|
||||
setLang("en_US", locale_dir="", local=False)
|
||||
currentLang = lang
|
||||
current_catalog = gettext.translation(
|
||||
"anki", gettext_dir, languages=[lang], fallback=True
|
||||
)
|
||||
locale_folder = locale_dir
|
||||
|
@ -6,15 +6,15 @@ from __future__ import annotations
|
||||
import html
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
from typing import Any, Optional
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
import anki
|
||||
from anki import hooks
|
||||
from anki.lang import _
|
||||
from anki.models import NoteType
|
||||
from anki.rsbackend import ExtractedLatex
|
||||
from anki.template import TemplateRenderContext, TemplateRenderOutput
|
||||
from anki.utils import call, checksum, isMac, namedtmp, stripHTML, tmpdir
|
||||
from anki.utils import call, isMac, namedtmp, tmpdir
|
||||
|
||||
pngCommands = [
|
||||
["latex", "-interaction=nonstopmode", "tmp.tex"],
|
||||
@ -27,27 +27,12 @@ svgCommands = [
|
||||
]
|
||||
|
||||
build = True # if off, use existing media but don't create new
|
||||
regexps = {
|
||||
"standard": re.compile(r"\[latex\](.+?)\[/latex\]", re.DOTALL | re.IGNORECASE),
|
||||
"expression": re.compile(r"\[\$\](.+?)\[/\$\]", re.DOTALL | re.IGNORECASE),
|
||||
"math": re.compile(r"\[\$\$\](.+?)\[/\$\$\]", re.DOTALL | re.IGNORECASE),
|
||||
}
|
||||
|
||||
# add standard tex install location to osx
|
||||
if isMac:
|
||||
os.environ["PATH"] += ":/usr/texbin:/Library/TeX/texbin"
|
||||
|
||||
|
||||
def stripLatex(text) -> Any:
|
||||
for match in regexps["standard"].finditer(text):
|
||||
text = text.replace(match.group(), "")
|
||||
for match in regexps["expression"].finditer(text):
|
||||
text = text.replace(match.group(), "")
|
||||
for match in regexps["math"].finditer(text):
|
||||
text = text.replace(match.group(), "")
|
||||
return text
|
||||
|
||||
|
||||
def on_card_did_render(output: TemplateRenderOutput, ctx: TemplateRenderContext):
|
||||
output.question_text = render_latex(
|
||||
output.question_text, ctx.note_type(), ctx.col()
|
||||
@ -56,61 +41,48 @@ def on_card_did_render(output: TemplateRenderOutput, ctx: TemplateRenderContext)
|
||||
|
||||
|
||||
def render_latex(html: str, model: NoteType, col: anki.storage._Collection,) -> str:
|
||||
"Convert TEXT with embedded latex tags to image links."
|
||||
for match in regexps["standard"].finditer(html):
|
||||
html = html.replace(match.group(), _imgLink(col, match.group(1), model))
|
||||
for match in regexps["expression"].finditer(html):
|
||||
html = html.replace(
|
||||
match.group(), _imgLink(col, "$" + match.group(1) + "$", model)
|
||||
)
|
||||
for match in regexps["math"].finditer(html):
|
||||
html = html.replace(
|
||||
match.group(),
|
||||
_imgLink(
|
||||
col,
|
||||
"\\begin{displaymath}" + match.group(1) + "\\end{displaymath}",
|
||||
model,
|
||||
),
|
||||
)
|
||||
"Convert embedded latex tags in text to image links."
|
||||
html, err = render_latex_returning_errors(html, model, col)
|
||||
if err:
|
||||
html += "\n".join(err)
|
||||
return html
|
||||
|
||||
|
||||
def _imgLink(col, latex: str, model: NoteType) -> str:
|
||||
"Return an img link for LATEX, creating if necesssary."
|
||||
txt = _latexFromHtml(col, latex)
|
||||
def render_latex_returning_errors(
|
||||
html: str, model: NoteType, col: anki.storage._Collection
|
||||
) -> Tuple[str, List[str]]:
|
||||
"""Returns (text, errors).
|
||||
|
||||
if model.get("latexsvg", False):
|
||||
ext = "svg"
|
||||
else:
|
||||
ext = "png"
|
||||
errors will be non-empty if LaTeX failed to render."""
|
||||
svg = model.get("latexsvg", False)
|
||||
header = model["latexPre"]
|
||||
footer = model["latexPost"]
|
||||
|
||||
# is there an existing file?
|
||||
fname = "latex-%s.%s" % (checksum(txt.encode("utf8")), ext)
|
||||
link = '<img class=latex src="%s">' % fname
|
||||
if os.path.exists(fname):
|
||||
return link
|
||||
out = col.backend.extract_latex(html, svg)
|
||||
errors = []
|
||||
html = out.html
|
||||
|
||||
# building disabled?
|
||||
if not build:
|
||||
return "[latex]%s[/latex]" % latex
|
||||
for latex in out.latex:
|
||||
# don't need to render?
|
||||
if not build or col.media.have(latex.filename):
|
||||
continue
|
||||
|
||||
err = _buildImg(col, txt, fname, model)
|
||||
if err:
|
||||
return err
|
||||
else:
|
||||
return link
|
||||
err = _save_latex_image(col, latex, header, footer, svg)
|
||||
if err is not None:
|
||||
errors.append(err)
|
||||
|
||||
return html, errors
|
||||
|
||||
|
||||
def _latexFromHtml(col, latex: str) -> str:
|
||||
"Convert entities and fix newlines."
|
||||
latex = re.sub("<br( /)?>|<div>", "\n", latex)
|
||||
latex = stripHTML(latex)
|
||||
return latex
|
||||
|
||||
|
||||
def _buildImg(col, latex: str, fname: str, model: NoteType) -> Optional[str]:
|
||||
def _save_latex_image(
|
||||
col: anki.storage._Collection,
|
||||
extracted: ExtractedLatex,
|
||||
header: str,
|
||||
footer: str,
|
||||
svg: bool,
|
||||
) -> Optional[str]:
|
||||
# add header/footer
|
||||
latex = model["latexPre"] + "\n" + latex + "\n" + model["latexPost"]
|
||||
latex = header + "\n" + extracted.latex_body + "\n" + footer
|
||||
# it's only really secure if run in a jail, but these are the most common
|
||||
tmplatex = latex.replace("\\includegraphics", "")
|
||||
for bad in (
|
||||
@ -138,8 +110,8 @@ package in the LaTeX header instead."""
|
||||
% bad
|
||||
)
|
||||
|
||||
# commands to use?
|
||||
if model.get("latexsvg", False):
|
||||
# commands to use
|
||||
if svg:
|
||||
latexCmds = svgCommands
|
||||
ext = "svg"
|
||||
else:
|
||||
@ -152,17 +124,18 @@ package in the LaTeX header instead."""
|
||||
texfile = open(texpath, "w", encoding="utf8")
|
||||
texfile.write(latex)
|
||||
texfile.close()
|
||||
mdir = col.media.dir()
|
||||
oldcwd = os.getcwd()
|
||||
png = namedtmp("tmp.%s" % ext)
|
||||
png_or_svg = namedtmp("tmp.%s" % ext)
|
||||
try:
|
||||
# generate png
|
||||
# generate png/svg
|
||||
os.chdir(tmpdir())
|
||||
for latexCmd in latexCmds:
|
||||
if call(latexCmd, stdout=log, stderr=log):
|
||||
return _errMsg(latexCmd[0], texpath)
|
||||
# add to media
|
||||
shutil.copyfile(png, os.path.join(mdir, fname))
|
||||
data = open(png_or_svg, "rb").read()
|
||||
col.media.write_data(extracted.filename, data)
|
||||
os.unlink(png_or_svg)
|
||||
return None
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
|
@ -3,26 +3,31 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import unicodedata
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import zipfile
|
||||
from typing import Any, Callable, List, Optional, Tuple, Union
|
||||
|
||||
import anki
|
||||
from anki.consts import *
|
||||
from anki.db import DB, DBError
|
||||
from anki.lang import _
|
||||
from anki.latex import render_latex
|
||||
from anki.utils import checksum, isMac, isWin
|
||||
from anki.latex import render_latex, render_latex_returning_errors
|
||||
from anki.rsbackend import MediaCheckOutput
|
||||
from anki.utils import intTime
|
||||
|
||||
|
||||
def media_paths_from_col_path(col_path: str) -> Tuple[str, str]:
|
||||
media_folder = re.sub(r"(?i)\.(anki2)$", ".media", col_path)
|
||||
media_db = media_folder + ".db2"
|
||||
return (media_folder, media_db)
|
||||
|
||||
|
||||
# fixme: look into whether we can drop chdir() below
|
||||
# - need to check aa89d06304fecd3597da4565330a3e55bdbb91fe
|
||||
# - and audio handling code
|
||||
|
||||
|
||||
class MediaManager:
|
||||
@ -35,7 +40,6 @@ class MediaManager:
|
||||
r"(?i)(<img[^>]* src=(?!['\"])(?P<fname>[^ >]+)[^>]*?>)",
|
||||
]
|
||||
regexps = soundRegexps + imgRegexps
|
||||
db: Optional[DB]
|
||||
|
||||
def __init__(self, col: anki.storage._Collection, server: bool) -> None:
|
||||
self.col = col
|
||||
@ -43,7 +47,7 @@ class MediaManager:
|
||||
self._dir = None
|
||||
return
|
||||
# media directory
|
||||
self._dir = re.sub(r"(?i)\.(anki2)$", ".media", self.col.path)
|
||||
self._dir = media_paths_from_col_path(self.col.path)[0]
|
||||
if not os.path.exists(self._dir):
|
||||
os.makedirs(self._dir)
|
||||
try:
|
||||
@ -55,72 +59,15 @@ class MediaManager:
|
||||
os.chdir(self._dir)
|
||||
except OSError:
|
||||
raise Exception("invalidTempFolder")
|
||||
# change database
|
||||
self.connect()
|
||||
|
||||
def connect(self) -> None:
|
||||
if self.col.server:
|
||||
return
|
||||
path = self.dir() + ".db2"
|
||||
create = not os.path.exists(path)
|
||||
os.chdir(self._dir)
|
||||
self.db = DB(path)
|
||||
if create:
|
||||
self._initDB()
|
||||
self.maybeUpgrade()
|
||||
|
||||
def _initDB(self) -> None:
|
||||
self.db.executescript(
|
||||
"""
|
||||
create table media (
|
||||
fname text not null primary key,
|
||||
csum text, -- null indicates deleted file
|
||||
mtime int not null, -- zero if deleted
|
||||
dirty int not null
|
||||
);
|
||||
|
||||
create index idx_media_dirty on media (dirty);
|
||||
|
||||
create table meta (dirMod int, lastUsn int); insert into meta values (0, 0);
|
||||
"""
|
||||
)
|
||||
|
||||
def maybeUpgrade(self) -> None:
|
||||
oldpath = self.dir() + ".db"
|
||||
if os.path.exists(oldpath):
|
||||
self.db.execute('attach "../collection.media.db" as old')
|
||||
try:
|
||||
self.db.execute(
|
||||
"""
|
||||
insert into media
|
||||
select m.fname, csum, mod, ifnull((select 1 from log l2 where l2.fname=m.fname), 0) as dirty
|
||||
from old.media m
|
||||
left outer join old.log l using (fname)
|
||||
union
|
||||
select fname, null, 0, 1 from old.log where type=1;"""
|
||||
)
|
||||
self.db.execute("delete from meta")
|
||||
self.db.execute(
|
||||
"""
|
||||
insert into meta select dirMod, usn from old.meta
|
||||
"""
|
||||
)
|
||||
self.db.commit()
|
||||
except Exception as e:
|
||||
# if we couldn't import the old db for some reason, just start
|
||||
# anew
|
||||
self.col.log("failed to import old media db:" + traceback.format_exc())
|
||||
self.db.execute("detach old")
|
||||
npath = "../collection.media.db.old"
|
||||
if os.path.exists(npath):
|
||||
os.unlink(npath)
|
||||
os.rename("../collection.media.db", npath)
|
||||
|
||||
def close(self) -> None:
|
||||
if self.col.server:
|
||||
return
|
||||
self.db.close()
|
||||
self.db = None
|
||||
# change cwd back to old location
|
||||
if self._oldcwd:
|
||||
try:
|
||||
@ -129,84 +76,47 @@ create table meta (dirMod int, lastUsn int); insert into meta values (0, 0);
|
||||
# may have been deleted
|
||||
pass
|
||||
|
||||
def _deleteDB(self) -> None:
|
||||
path = self.db._path
|
||||
self.close()
|
||||
os.unlink(path)
|
||||
self.connect()
|
||||
|
||||
def dir(self) -> Any:
|
||||
return self._dir
|
||||
|
||||
def _isFAT32(self) -> bool:
|
||||
if not isWin:
|
||||
return False
|
||||
# pylint: disable=import-error
|
||||
import win32api, win32file # pytype: disable=import-error
|
||||
def force_resync(self) -> None:
|
||||
os.unlink(media_paths_from_col_path(self.col.path)[1])
|
||||
|
||||
try:
|
||||
name = win32file.GetVolumeNameForVolumeMountPoint(self._dir[:3])
|
||||
except:
|
||||
# mapped & unmapped network drive; pray that it's not vfat
|
||||
return False
|
||||
if win32api.GetVolumeInformation(name)[4].lower().startswith("fat"):
|
||||
return True
|
||||
return False
|
||||
|
||||
# Adding media
|
||||
# File manipulation
|
||||
##########################################################################
|
||||
# opath must be in unicode
|
||||
|
||||
def addFile(self, opath: str) -> Any:
|
||||
with open(opath, "rb") as f:
|
||||
return self.writeData(opath, f.read())
|
||||
def add_file(self, path: str) -> str:
|
||||
"""Add basename of path to the media folder, renaming if not unique.
|
||||
|
||||
def writeData(self, opath: str, data: bytes, typeHint: Optional[str] = None) -> Any:
|
||||
# if fname is a full path, use only the basename
|
||||
fname = os.path.basename(opath)
|
||||
Returns possibly-renamed filename."""
|
||||
with open(path, "rb") as f:
|
||||
return self.write_data(os.path.basename(path), f.read())
|
||||
|
||||
# if it's missing an extension and a type hint was provided, use that
|
||||
if not os.path.splitext(fname)[1] and typeHint:
|
||||
def write_data(self, desired_fname: str, data: bytes) -> str:
|
||||
"""Write the file to the media folder, renaming if not unique.
|
||||
|
||||
Returns possibly-renamed filename."""
|
||||
return self.col.backend.add_file_to_media_folder(desired_fname, data)
|
||||
|
||||
def add_extension_based_on_mime(self, fname: str, content_type: str) -> str:
|
||||
"If jpg or png mime, add .png/.jpg if missing extension."
|
||||
if not os.path.splitext(fname)[1]:
|
||||
# mimetypes is returning '.jpe' even after calling .init(), so we'll do
|
||||
# it manually instead
|
||||
typeMap = {
|
||||
type_map = {
|
||||
"image/jpeg": ".jpg",
|
||||
"image/png": ".png",
|
||||
}
|
||||
if typeHint in typeMap:
|
||||
fname += typeMap[typeHint]
|
||||
if content_type in type_map:
|
||||
fname += type_map[content_type]
|
||||
return fname
|
||||
|
||||
# make sure we write it in NFC form (pre-APFS Macs will autoconvert to NFD),
|
||||
# and return an NFC-encoded reference
|
||||
fname = unicodedata.normalize("NFC", fname)
|
||||
# ensure it's a valid filename
|
||||
base = self.cleanFilename(fname)
|
||||
(root, ext) = os.path.splitext(base)
|
||||
def have(self, fname: str) -> bool:
|
||||
return os.path.exists(os.path.join(self.dir(), fname))
|
||||
|
||||
def repl(match):
|
||||
n = int(match.group(1))
|
||||
return " (%d)" % (n + 1)
|
||||
|
||||
# find the first available name
|
||||
csum = checksum(data)
|
||||
while True:
|
||||
fname = root + ext
|
||||
path = os.path.join(self.dir(), fname)
|
||||
# if it doesn't exist, copy it directly
|
||||
if not os.path.exists(path):
|
||||
with open(path, "wb") as f:
|
||||
f.write(data)
|
||||
return fname
|
||||
# if it's identical, reuse
|
||||
with open(path, "rb") as f:
|
||||
if checksum(f.read()) == csum:
|
||||
return fname
|
||||
# otherwise, increment the index in the filename
|
||||
reg = r" \((\d+)\)$"
|
||||
if not re.search(reg, root):
|
||||
root = root + " (1)"
|
||||
else:
|
||||
root = re.sub(reg, repl, root)
|
||||
def trash_files(self, fnames: List[str]) -> None:
|
||||
"Move provided files to the trash."
|
||||
self.col.backend.trash_media_files(fnames)
|
||||
|
||||
# String manipulation
|
||||
##########################################################################
|
||||
@ -216,12 +126,6 @@ create table meta (dirMod int, lastUsn int); insert into meta values (0, 0);
|
||||
) -> List[str]:
|
||||
l = []
|
||||
model = self.col.models.get(mid)
|
||||
if model["type"] == MODEL_CLOZE and "{{c" in string:
|
||||
# if the field has clozes in it, we'll need to expand the
|
||||
# possibilities so we can render latex
|
||||
strings = self.col.backend.expand_clozes_to_reveal_latex(string)
|
||||
else:
|
||||
strings = string
|
||||
# handle latex
|
||||
string = render_latex(string, model, self.col)
|
||||
# extract filenames
|
||||
@ -239,11 +143,13 @@ create table meta (dirMod int, lastUsn int); insert into meta values (0, 0);
|
||||
return txt
|
||||
|
||||
def strip(self, txt: str) -> str:
|
||||
"Return text with sound and image tags removed."
|
||||
for reg in self.regexps:
|
||||
txt = re.sub(reg, "", txt)
|
||||
return txt
|
||||
|
||||
def escapeImages(self, string: str, unescape: bool = False) -> str:
|
||||
"Apply or remove percent encoding to image filenames."
|
||||
fn: Callable
|
||||
if unescape:
|
||||
fn = urllib.parse.unquote
|
||||
@ -261,110 +167,55 @@ create table meta (dirMod int, lastUsn int); insert into meta values (0, 0);
|
||||
string = re.sub(reg, repl, string)
|
||||
return string
|
||||
|
||||
# Rebuilding DB
|
||||
# Checking media
|
||||
##########################################################################
|
||||
|
||||
def check(
|
||||
self, local: Optional[List[str]] = None
|
||||
) -> Tuple[List[str], List[str], List[str]]:
|
||||
"Return (missingFiles, unusedFiles)."
|
||||
mdir = self.dir()
|
||||
# gather all media references in NFC form
|
||||
allRefs = set()
|
||||
for nid, mid, flds in self.col.db.execute("select id, mid, flds from notes"):
|
||||
noteRefs = self.filesInStr(mid, flds)
|
||||
# check the refs are in NFC
|
||||
for f in noteRefs:
|
||||
# if they're not, we'll need to fix them first
|
||||
if f != unicodedata.normalize("NFC", f):
|
||||
self._normalizeNoteRefs(nid)
|
||||
noteRefs = self.filesInStr(mid, flds)
|
||||
break
|
||||
allRefs.update(noteRefs)
|
||||
# loop through media folder
|
||||
unused = []
|
||||
if local is None:
|
||||
files = os.listdir(mdir)
|
||||
else:
|
||||
files = local
|
||||
renamedFiles = False
|
||||
dirFound = False
|
||||
warnings = []
|
||||
for file in files:
|
||||
if not local:
|
||||
if not os.path.isfile(file):
|
||||
# ignore directories
|
||||
dirFound = True
|
||||
continue
|
||||
if file.startswith("_"):
|
||||
# leading _ says to ignore file
|
||||
continue
|
||||
def check(self) -> MediaCheckOutput:
|
||||
"This should be called while the collection is closed."
|
||||
return self.col.backend.check_media()
|
||||
|
||||
if self.hasIllegal(file):
|
||||
name = file.encode(sys.getfilesystemencoding(), errors="replace")
|
||||
name = str(name, sys.getfilesystemencoding())
|
||||
warnings.append(_("Invalid file name, please rename: %s") % name)
|
||||
continue
|
||||
def render_all_latex(
|
||||
self, progress_cb: Optional[Callable[[int], bool]] = None
|
||||
) -> Optional[Tuple[int, str]]:
|
||||
"""Render any LaTeX that is missing.
|
||||
|
||||
nfcFile = unicodedata.normalize("NFC", file)
|
||||
# we enforce NFC fs encoding on non-macs
|
||||
if not isMac and not local:
|
||||
if file != nfcFile:
|
||||
# delete if we already have the NFC form, otherwise rename
|
||||
if os.path.exists(nfcFile):
|
||||
os.unlink(file)
|
||||
renamedFiles = True
|
||||
else:
|
||||
os.rename(file, nfcFile)
|
||||
renamedFiles = True
|
||||
file = nfcFile
|
||||
# compare
|
||||
if nfcFile not in allRefs:
|
||||
unused.append(file)
|
||||
else:
|
||||
allRefs.discard(nfcFile)
|
||||
# if we renamed any files to nfc format, we must rerun the check
|
||||
# to make sure the renamed files are not marked as unused
|
||||
if renamedFiles:
|
||||
return self.check(local=local)
|
||||
nohave = [x for x in allRefs if not x.startswith("_")]
|
||||
# make sure the media DB is valid
|
||||
try:
|
||||
self.findChanges()
|
||||
except DBError:
|
||||
self._deleteDB()
|
||||
If a progress callback is provided and it returns false, the operation
|
||||
will be aborted.
|
||||
|
||||
if dirFound:
|
||||
warnings.append(
|
||||
_(
|
||||
"Anki does not support files in subfolders of the collection.media folder."
|
||||
)
|
||||
)
|
||||
return (nohave, unused, warnings)
|
||||
If an error is encountered, returns (note_id, error_message)
|
||||
"""
|
||||
last_progress = time.time()
|
||||
checked = 0
|
||||
for (nid, mid, flds) in self.col.db.execute(
|
||||
"select id, mid, flds from notes where flds like '%[%'"
|
||||
):
|
||||
|
||||
def _normalizeNoteRefs(self, nid) -> None:
|
||||
note = self.col.getNote(nid)
|
||||
for c, fld in enumerate(note.fields):
|
||||
nfc = unicodedata.normalize("NFC", fld)
|
||||
if nfc != fld:
|
||||
note.fields[c] = nfc
|
||||
note.flush()
|
||||
model = self.col.models.get(mid)
|
||||
_html, errors = render_latex_returning_errors(flds, model, self.col)
|
||||
if errors:
|
||||
return (nid, "\n".join(errors))
|
||||
|
||||
# Copying on import
|
||||
##########################################################################
|
||||
checked += 1
|
||||
elap = time.time() - last_progress
|
||||
if elap >= 0.3 and progress_cb is not None:
|
||||
last_progress = intTime()
|
||||
if not progress_cb(checked):
|
||||
return None
|
||||
|
||||
def have(self, fname: str) -> bool:
|
||||
return os.path.exists(os.path.join(self.dir(), fname))
|
||||
return None
|
||||
|
||||
# Illegal characters and paths
|
||||
# Legacy
|
||||
##########################################################################
|
||||
|
||||
_illegalCharReg = re.compile(r'[][><:"/?*^\\|\0\r\n]')
|
||||
|
||||
def stripIllegal(self, str: str) -> str:
|
||||
# currently used by ankiconnect
|
||||
print("stripIllegal() will go away")
|
||||
return re.sub(self._illegalCharReg, "", str)
|
||||
|
||||
def hasIllegal(self, s: str) -> bool:
|
||||
print("hasIllegal() will go away")
|
||||
if re.search(self._illegalCharReg, s):
|
||||
return True
|
||||
try:
|
||||
@ -373,254 +224,13 @@ create table meta (dirMod int, lastUsn int); insert into meta values (0, 0);
|
||||
return True
|
||||
return False
|
||||
|
||||
def cleanFilename(self, fname: str) -> str:
|
||||
fname = self.stripIllegal(fname)
|
||||
fname = self._cleanWin32Filename(fname)
|
||||
fname = self._cleanLongFilename(fname)
|
||||
if not fname:
|
||||
fname = "renamed"
|
||||
|
||||
return fname
|
||||
|
||||
def _cleanWin32Filename(self, fname: str) -> str:
|
||||
if not isWin:
|
||||
return fname
|
||||
|
||||
# deal with things like con/prn/etc
|
||||
p = pathlib.WindowsPath(fname)
|
||||
if p.is_reserved():
|
||||
fname = "renamed" + fname
|
||||
assert not pathlib.WindowsPath(fname).is_reserved()
|
||||
|
||||
return fname
|
||||
|
||||
def _cleanLongFilename(self, fname: str) -> Any:
|
||||
# a fairly safe limit that should work on typical windows
|
||||
# paths and on eCryptfs partitions, even with a duplicate
|
||||
# suffix appended
|
||||
namemax = 136
|
||||
|
||||
if isWin:
|
||||
pathmax = 240
|
||||
else:
|
||||
pathmax = 1024
|
||||
|
||||
# cap namemax based on absolute path
|
||||
dirlen = len(os.path.dirname(os.path.abspath(fname)))
|
||||
remaining = pathmax - dirlen
|
||||
namemax = min(remaining, namemax)
|
||||
assert namemax > 0
|
||||
|
||||
if len(fname) > namemax:
|
||||
head, ext = os.path.splitext(fname)
|
||||
headmax = namemax - len(ext)
|
||||
head = head[0:headmax]
|
||||
fname = head + ext
|
||||
assert len(fname) <= namemax
|
||||
|
||||
return fname
|
||||
|
||||
# Tracking changes
|
||||
##########################################################################
|
||||
|
||||
def findChanges(self) -> None:
|
||||
"Scan the media folder if it's changed, and note any changes."
|
||||
if self._changed():
|
||||
self._logChanges()
|
||||
pass
|
||||
|
||||
def haveDirty(self) -> Any:
|
||||
return self.db.scalar("select 1 from media where dirty=1 limit 1")
|
||||
addFile = add_file
|
||||
|
||||
def _mtime(self, path: str) -> int:
|
||||
return int(os.stat(path).st_mtime)
|
||||
|
||||
def _checksum(self, path: str) -> str:
|
||||
with open(path, "rb") as f:
|
||||
return checksum(f.read())
|
||||
|
||||
def _changed(self) -> int:
|
||||
"Return dir mtime if it has changed since the last findChanges()"
|
||||
# doesn't track edits, but user can add or remove a file to update
|
||||
mod = self.db.scalar("select dirMod from meta")
|
||||
mtime = self._mtime(self.dir())
|
||||
if not self._isFAT32() and mod and mod == mtime:
|
||||
return False
|
||||
return mtime
|
||||
|
||||
def _logChanges(self) -> None:
|
||||
(added, removed) = self._changes()
|
||||
media = []
|
||||
for f, mtime in added:
|
||||
media.append((f, self._checksum(f), mtime, 1))
|
||||
for f in removed:
|
||||
media.append((f, None, 0, 1))
|
||||
# update media db
|
||||
self.db.executemany("insert or replace into media values (?,?,?,?)", media)
|
||||
self.db.execute("update meta set dirMod = ?", self._mtime(self.dir()))
|
||||
self.db.commit()
|
||||
|
||||
def _changes(self) -> Tuple[List[Tuple[str, int]], List[str]]:
|
||||
self.cache: Dict[str, Any] = {}
|
||||
for (name, csum, mod) in self.db.execute(
|
||||
"select fname, csum, mtime from media where csum is not null"
|
||||
):
|
||||
# previous entries may not have been in NFC form
|
||||
normname = unicodedata.normalize("NFC", name)
|
||||
self.cache[normname] = [csum, mod, False]
|
||||
added = []
|
||||
removed = []
|
||||
# loop through on-disk files
|
||||
with os.scandir(self.dir()) as it:
|
||||
for f in it:
|
||||
# ignore folders and thumbs.db
|
||||
if f.is_dir():
|
||||
continue
|
||||
if f.name.lower() == "thumbs.db":
|
||||
continue
|
||||
# and files with invalid chars
|
||||
if self.hasIllegal(f.name):
|
||||
continue
|
||||
# empty files are invalid; clean them up and continue
|
||||
sz = f.stat().st_size
|
||||
if not sz:
|
||||
os.unlink(f.name)
|
||||
continue
|
||||
if sz > 100 * 1024 * 1024:
|
||||
self.col.log("ignoring file over 100MB", f.name)
|
||||
continue
|
||||
# check encoding
|
||||
normname = unicodedata.normalize("NFC", f.name)
|
||||
if not isMac:
|
||||
if f.name != normname:
|
||||
# wrong filename encoding which will cause sync errors
|
||||
if os.path.exists(normname):
|
||||
os.unlink(f.name)
|
||||
else:
|
||||
os.rename(f.name, normname)
|
||||
else:
|
||||
# on Macs we can access the file using any normalization
|
||||
pass
|
||||
|
||||
# newly added?
|
||||
mtime = int(f.stat().st_mtime)
|
||||
if normname not in self.cache:
|
||||
added.append((normname, mtime))
|
||||
else:
|
||||
# modified since last time?
|
||||
if mtime != self.cache[normname][1]:
|
||||
# and has different checksum?
|
||||
if self._checksum(normname) != self.cache[normname][0]:
|
||||
added.append((normname, mtime))
|
||||
# mark as used
|
||||
self.cache[normname][2] = True
|
||||
# look for any entries in the cache that no longer exist on disk
|
||||
for (k, v) in list(self.cache.items()):
|
||||
if not v[2]:
|
||||
removed.append(k)
|
||||
return added, removed
|
||||
|
||||
# Syncing-related
|
||||
##########################################################################
|
||||
|
||||
def lastUsn(self) -> Any:
|
||||
return self.db.scalar("select lastUsn from meta")
|
||||
|
||||
def setLastUsn(self, usn) -> None:
|
||||
self.db.execute("update meta set lastUsn = ?", usn)
|
||||
self.db.commit()
|
||||
|
||||
def syncInfo(self, fname) -> Any:
|
||||
ret = self.db.first("select csum, dirty from media where fname=?", fname)
|
||||
return ret or (None, 0)
|
||||
|
||||
def markClean(self, fnames) -> None:
|
||||
for fname in fnames:
|
||||
self.db.execute("update media set dirty=0 where fname=?", fname)
|
||||
|
||||
def syncDelete(self, fname) -> None:
|
||||
if os.path.exists(fname):
|
||||
os.unlink(fname)
|
||||
self.db.execute("delete from media where fname=?", fname)
|
||||
|
||||
def mediaCount(self) -> Any:
|
||||
return self.db.scalar("select count() from media where csum is not null")
|
||||
|
||||
def dirtyCount(self) -> Any:
|
||||
return self.db.scalar("select count() from media where dirty=1")
|
||||
|
||||
def forceResync(self) -> None:
|
||||
self.db.execute("delete from media")
|
||||
self.db.execute("update meta set lastUsn=0,dirMod=0")
|
||||
self.db.commit()
|
||||
self.db.setAutocommit(True)
|
||||
self.db.execute("vacuum")
|
||||
self.db.execute("analyze")
|
||||
self.db.setAutocommit(False)
|
||||
|
||||
# Media syncing: zips
|
||||
##########################################################################
|
||||
|
||||
def mediaChangesZip(self) -> Tuple[bytes, list]:
|
||||
f = io.BytesIO()
|
||||
z = zipfile.ZipFile(f, "w", compression=zipfile.ZIP_DEFLATED)
|
||||
|
||||
fnames = []
|
||||
# meta is list of (fname, zipname), where zipname of None
|
||||
# is a deleted file
|
||||
meta = []
|
||||
sz = 0
|
||||
|
||||
for c, (fname, csum) in enumerate(
|
||||
self.db.execute(
|
||||
"select fname, csum from media where dirty=1"
|
||||
" limit %d" % SYNC_ZIP_COUNT
|
||||
)
|
||||
):
|
||||
|
||||
fnames.append(fname)
|
||||
normname = unicodedata.normalize("NFC", fname)
|
||||
|
||||
if csum:
|
||||
self.col.log("+media zip", fname)
|
||||
z.write(fname, str(c))
|
||||
meta.append((normname, str(c)))
|
||||
sz += os.path.getsize(fname)
|
||||
else:
|
||||
self.col.log("-media zip", fname)
|
||||
meta.append((normname, ""))
|
||||
|
||||
if sz >= SYNC_ZIP_SIZE:
|
||||
break
|
||||
|
||||
z.writestr("_meta", json.dumps(meta))
|
||||
z.close()
|
||||
return f.getvalue(), fnames
|
||||
|
||||
def addFilesFromZip(self, zipData) -> int:
|
||||
"Extract zip data; true if finished."
|
||||
f = io.BytesIO(zipData)
|
||||
z = zipfile.ZipFile(f, "r")
|
||||
media = []
|
||||
# get meta info first
|
||||
meta = json.loads(z.read("_meta").decode("utf8"))
|
||||
# then loop through all files
|
||||
cnt = 0
|
||||
for i in z.infolist():
|
||||
if i.filename == "_meta":
|
||||
# ignore previously-retrieved meta
|
||||
continue
|
||||
else:
|
||||
data = z.read(i)
|
||||
csum = checksum(data)
|
||||
name = meta[i.filename]
|
||||
# normalize name
|
||||
name = unicodedata.normalize("NFC", name)
|
||||
# save file
|
||||
with open(name, "wb") as f: # type: ignore
|
||||
f.write(data)
|
||||
# update db
|
||||
media.append((name, csum, self._mtime(name), 0))
|
||||
cnt += 1
|
||||
if media:
|
||||
self.db.executemany("insert or replace into media values (?,?,?,?)", media)
|
||||
return cnt
|
||||
def writeData(self, opath: str, data: bytes, typeHint: Optional[str] = None) -> str:
|
||||
fname = os.path.basename(opath)
|
||||
if typeHint:
|
||||
fname = self.add_extension_based_on_mime(fname, typeHint)
|
||||
return self.write_data(fname, data)
|
||||
|
@ -6,6 +6,7 @@ from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import anki # pylint: disable=unused-import
|
||||
from anki import hooks
|
||||
from anki.models import Field, NoteType
|
||||
from anki.utils import (
|
||||
fieldChecksum,
|
||||
@ -202,6 +203,7 @@ insert or replace into notes values (?,?,?,?,?,?,?,?,?,?,?)""",
|
||||
##################################################
|
||||
|
||||
def _preFlush(self) -> None:
|
||||
hooks.note_will_flush(self)
|
||||
# have we been added yet?
|
||||
self.newlyAdded = not self.col.db.scalar(
|
||||
"select 1 from cards where nid = ?", self.id
|
||||
|
@ -2,31 +2,88 @@
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
# pylint: skip-file
|
||||
|
||||
import enum
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Tuple, Union
|
||||
from typing import Callable, Dict, List, NewType, NoReturn, Optional, Tuple, Union
|
||||
|
||||
import ankirspy # pytype: disable=import-error
|
||||
|
||||
import anki.backend_pb2 as pb
|
||||
import anki.buildinfo
|
||||
from anki import hooks
|
||||
from anki.models import AllTemplateReqs
|
||||
from anki.sound import AVTag, SoundOrVideoTag, TTSTag
|
||||
from anki.types import assert_impossible_literal
|
||||
|
||||
assert ankirspy.buildhash() == anki.buildinfo.buildhash
|
||||
|
||||
SchedTimingToday = pb.SchedTimingTodayOut
|
||||
|
||||
|
||||
class BackendException(Exception):
|
||||
class Interrupted(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class StringError(Exception):
|
||||
def __str__(self) -> str:
|
||||
err: pb.BackendError = self.args[0] # pylint: disable=unsubscriptable-object
|
||||
kind = err.WhichOneof("value")
|
||||
if kind == "invalid_input":
|
||||
return f"invalid input: {err.invalid_input.info}"
|
||||
elif kind == "template_parse":
|
||||
return err.template_parse.info
|
||||
else:
|
||||
return f"unhandled error: {err}"
|
||||
return self.args[0] # pylint: disable=unsubscriptable-object
|
||||
|
||||
|
||||
NetworkErrorKind = pb.NetworkError.NetworkErrorKind
|
||||
|
||||
|
||||
class NetworkError(StringError):
|
||||
def kind(self) -> NetworkErrorKind:
|
||||
return self.args[1]
|
||||
|
||||
def localized(self) -> str:
|
||||
return self.args[2]
|
||||
|
||||
|
||||
class IOError(StringError):
|
||||
pass
|
||||
|
||||
|
||||
class DBError(StringError):
|
||||
pass
|
||||
|
||||
|
||||
class TemplateError(StringError):
|
||||
pass
|
||||
|
||||
|
||||
SyncErrorKind = pb.SyncError.SyncErrorKind
|
||||
|
||||
|
||||
class SyncError(StringError):
|
||||
def kind(self) -> SyncErrorKind:
|
||||
return self.args[1]
|
||||
|
||||
def localized(self) -> str:
|
||||
return self.args[2]
|
||||
|
||||
|
||||
def proto_exception_to_native(err: pb.BackendError) -> Exception:
|
||||
val = err.WhichOneof("value")
|
||||
if val == "interrupted":
|
||||
return Interrupted()
|
||||
elif val == "network_error":
|
||||
e = err.network_error
|
||||
return NetworkError(e.info, e.kind, e.localized)
|
||||
elif val == "io_error":
|
||||
return IOError(err.io_error.info)
|
||||
elif val == "db_error":
|
||||
return DBError(err.db_error.info)
|
||||
elif val == "template_parse":
|
||||
return TemplateError(err.template_parse.info)
|
||||
elif val == "invalid_input":
|
||||
return StringError(err.invalid_input.info)
|
||||
elif val == "sync_error":
|
||||
e2 = err.sync_error
|
||||
return SyncError(e2.info, e2.kind, e2.localized)
|
||||
else:
|
||||
assert_impossible_literal(val)
|
||||
|
||||
|
||||
def proto_template_reqs_to_legacy(
|
||||
@ -71,6 +128,36 @@ class TemplateReplacement:
|
||||
TemplateReplacementList = List[Union[str, TemplateReplacement]]
|
||||
|
||||
|
||||
MediaSyncProgress = pb.MediaSyncProgress
|
||||
|
||||
MediaCheckOutput = pb.MediaCheckOut
|
||||
|
||||
StringsGroup = pb.StringsGroup
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractedLatex:
|
||||
filename: str
|
||||
latex_body: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractedLatexOutput:
|
||||
html: str
|
||||
latex: List[ExtractedLatex]
|
||||
|
||||
|
||||
class ProgressKind(enum.Enum):
|
||||
MediaSync = 0
|
||||
MediaCheck = 1
|
||||
|
||||
|
||||
@dataclass
|
||||
class Progress:
|
||||
kind: ProgressKind
|
||||
val: Union[MediaSyncProgress, str]
|
||||
|
||||
|
||||
def proto_replacement_list_to_native(
|
||||
nodes: List[pb.RenderedTemplateNode],
|
||||
) -> TemplateReplacementList:
|
||||
@ -89,18 +176,45 @@ def proto_replacement_list_to_native(
|
||||
return results
|
||||
|
||||
|
||||
class RustBackend:
|
||||
def __init__(self, path: str):
|
||||
self._backend = ankirspy.Backend(path)
|
||||
def proto_progress_to_native(progress: pb.Progress) -> Progress:
|
||||
kind = progress.WhichOneof("value")
|
||||
if kind == "media_sync":
|
||||
return Progress(kind=ProgressKind.MediaSync, val=progress.media_sync)
|
||||
elif kind == "media_check":
|
||||
return Progress(kind=ProgressKind.MediaCheck, val=progress.media_check)
|
||||
else:
|
||||
assert_impossible_literal(kind)
|
||||
|
||||
def _run_command(self, input: pb.BackendInput) -> pb.BackendOutput:
|
||||
|
||||
class RustBackend:
|
||||
def __init__(self, col_path: str, media_folder_path: str, media_db_path: str):
|
||||
ftl_folder = os.path.join(anki.lang.locale_folder, "fluent")
|
||||
init_msg = pb.BackendInit(
|
||||
collection_path=col_path,
|
||||
media_folder_path=media_folder_path,
|
||||
media_db_path=media_db_path,
|
||||
locale_folder_path=ftl_folder,
|
||||
preferred_langs=[anki.lang.currentLang],
|
||||
)
|
||||
self._backend = ankirspy.open_backend(init_msg.SerializeToString())
|
||||
self._backend.set_progress_callback(self._on_progress)
|
||||
|
||||
def _on_progress(self, progress_bytes: bytes) -> bool:
|
||||
progress = pb.Progress()
|
||||
progress.ParseFromString(progress_bytes)
|
||||
native_progress = proto_progress_to_native(progress)
|
||||
return hooks.bg_thread_progress_callback(True, native_progress)
|
||||
|
||||
def _run_command(
|
||||
self, input: pb.BackendInput, release_gil: bool = False
|
||||
) -> pb.BackendOutput:
|
||||
input_bytes = input.SerializeToString()
|
||||
output_bytes = self._backend.command(input_bytes)
|
||||
output_bytes = self._backend.command(input_bytes, release_gil)
|
||||
output = pb.BackendOutput()
|
||||
output.ParseFromString(output_bytes)
|
||||
kind = output.WhichOneof("value")
|
||||
if kind == "error":
|
||||
raise BackendException(output.error)
|
||||
raise proto_exception_to_native(output.error)
|
||||
else:
|
||||
return output
|
||||
|
||||
@ -177,7 +291,54 @@ class RustBackend:
|
||||
|
||||
return out.text, native_tags
|
||||
|
||||
def expand_clozes_to_reveal_latex(self, text: str) -> str:
|
||||
def extract_latex(self, text: str, svg: bool) -> ExtractedLatexOutput:
|
||||
out = self._run_command(
|
||||
pb.BackendInput(extract_latex=pb.ExtractLatexIn(text=text, svg=svg))
|
||||
).extract_latex
|
||||
|
||||
return ExtractedLatexOutput(
|
||||
html=out.text,
|
||||
latex=[
|
||||
ExtractedLatex(filename=l.filename, latex_body=l.latex_body)
|
||||
for l in out.latex
|
||||
],
|
||||
)
|
||||
|
||||
def add_file_to_media_folder(self, desired_name: str, data: bytes) -> str:
|
||||
return self._run_command(
|
||||
pb.BackendInput(expand_clozes_to_reveal_latex=text)
|
||||
).expand_clozes_to_reveal_latex
|
||||
pb.BackendInput(
|
||||
add_media_file=pb.AddMediaFileIn(desired_name=desired_name, data=data)
|
||||
)
|
||||
).add_media_file
|
||||
|
||||
def sync_media(self, hkey: str, endpoint: str) -> None:
|
||||
self._run_command(
|
||||
pb.BackendInput(sync_media=pb.SyncMediaIn(hkey=hkey, endpoint=endpoint,)),
|
||||
release_gil=True,
|
||||
)
|
||||
|
||||
def check_media(self) -> MediaCheckOutput:
|
||||
return self._run_command(
|
||||
pb.BackendInput(check_media=pb.Empty()), release_gil=True,
|
||||
).check_media
|
||||
|
||||
def trash_media_files(self, fnames: List[str]) -> None:
|
||||
self._run_command(
|
||||
pb.BackendInput(trash_media_files=pb.TrashMediaFilesIn(fnames=fnames))
|
||||
)
|
||||
|
||||
def translate(
|
||||
self, group: pb.StringsGroup, key: str, **kwargs: Union[str, int, float]
|
||||
):
|
||||
args = {}
|
||||
for (k, v) in kwargs.items():
|
||||
if isinstance(v, str):
|
||||
args[k] = pb.TranslateArgValue(str=v)
|
||||
else:
|
||||
args[k] = pb.TranslateArgValue(number=str(v))
|
||||
|
||||
return self._run_command(
|
||||
pb.BackendInput(
|
||||
translate_string=pb.TranslateStringIn(group=group, key=key, args=args)
|
||||
)
|
||||
).translate_string
|
||||
|
@ -67,28 +67,28 @@ class Scheduler:
|
||||
self._burySiblings(card)
|
||||
card.reps += 1
|
||||
# former is for logging new cards, latter also covers filt. decks
|
||||
card.wasNew = card.type == 0
|
||||
wasNewQ = card.queue == 0
|
||||
card.wasNew = card.type == CARD_TYPE_NEW
|
||||
wasNewQ = card.queue == QUEUE_TYPE_NEW
|
||||
if wasNewQ:
|
||||
# came from the new queue, move to learning
|
||||
card.queue = 1
|
||||
card.queue = QUEUE_TYPE_LRN
|
||||
# if it was a new card, it's now a learning card
|
||||
if card.type == 0:
|
||||
card.type = 1
|
||||
if card.type == CARD_TYPE_NEW:
|
||||
card.type = CARD_TYPE_LRN
|
||||
# init reps to graduation
|
||||
card.left = self._startingLeft(card)
|
||||
# dynamic?
|
||||
if card.odid and card.type == 2:
|
||||
if card.odid and card.type == CARD_TYPE_REV:
|
||||
if self._resched(card):
|
||||
# reviews get their ivl boosted on first sight
|
||||
card.ivl = self._dynIvlBoost(card)
|
||||
card.odue = self.today + card.ivl
|
||||
self._updateStats(card, "new")
|
||||
if card.queue in (1, 3):
|
||||
if card.queue in (QUEUE_TYPE_LRN, QUEUE_TYPE_DAY_LEARN_RELEARN):
|
||||
self._answerLrnCard(card, ease)
|
||||
if not wasNewQ:
|
||||
self._updateStats(card, "lrn")
|
||||
elif card.queue == 2:
|
||||
elif card.queue == QUEUE_TYPE_REV:
|
||||
self._answerRevCard(card, ease)
|
||||
self._updateStats(card, "rev")
|
||||
else:
|
||||
@ -112,9 +112,9 @@ class Scheduler:
|
||||
"Return counts over next DAYS. Includes today."
|
||||
daysd = dict(
|
||||
self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select due, count() from cards
|
||||
where did in %s and queue = 2
|
||||
where did in %s and queue = {QUEUE_TYPE_REV}
|
||||
and due between ? and ?
|
||||
group by due
|
||||
order by due"""
|
||||
@ -132,20 +132,20 @@ order by due"""
|
||||
return ret
|
||||
|
||||
def countIdx(self, card):
|
||||
if card.queue == 3:
|
||||
if card.queue == QUEUE_TYPE_DAY_LEARN_RELEARN:
|
||||
return 1
|
||||
return card.queue
|
||||
|
||||
def answerButtons(self, card):
|
||||
if card.odue:
|
||||
# normal review in dyn deck?
|
||||
if card.odid and card.queue == 2:
|
||||
if card.odid and card.queue == QUEUE_TYPE_REV:
|
||||
return 4
|
||||
conf = self._lrnConf(card)
|
||||
if card.type in (0, 1) or len(conf["delays"]) > 1:
|
||||
if card.type in (CARD_TYPE_NEW, CARD_TYPE_LRN) or len(conf["delays"]) > 1:
|
||||
return 3
|
||||
return 2
|
||||
elif card.queue == 2:
|
||||
elif card.queue == QUEUE_TYPE_REV:
|
||||
return 4
|
||||
else:
|
||||
return 3
|
||||
@ -153,18 +153,25 @@ order by due"""
|
||||
def unburyCards(self):
|
||||
"Unbury cards."
|
||||
self.col.conf["lastUnburied"] = self.today
|
||||
self.col.log(self.col.db.list("select id from cards where queue = -2"))
|
||||
self.col.db.execute("update cards set queue=type where queue = -2")
|
||||
self.col.log(
|
||||
self.col.db.list(
|
||||
f"select id from cards where queue = {QUEUE_TYPE_SIBLING_BURIED}"
|
||||
)
|
||||
)
|
||||
self.col.db.execute(
|
||||
f"update cards set queue=type where queue = {QUEUE_TYPE_SIBLING_BURIED}"
|
||||
)
|
||||
|
||||
def unburyCardsForDeck(self):
|
||||
sids = ids2str(self.col.decks.active())
|
||||
self.col.log(
|
||||
self.col.db.list(
|
||||
"select id from cards where queue = -2 and did in %s" % sids
|
||||
f"select id from cards where queue = {QUEUE_TYPE_SIBLING_BURIED} and did in %s"
|
||||
% sids
|
||||
)
|
||||
)
|
||||
self.col.db.execute(
|
||||
"update cards set mod=?,usn=?,queue=type where queue = -2 and did in %s"
|
||||
f"update cards set mod=?,usn=?,queue=type where queue = {QUEUE_TYPE_SIBLING_BURIED} and did in %s"
|
||||
% sids,
|
||||
intTime(),
|
||||
self.col.usn(),
|
||||
@ -348,9 +355,9 @@ order by due"""
|
||||
|
||||
def _resetNewCount(self):
|
||||
cntFn = lambda did, lim: self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from (select 1 from cards where
|
||||
did = ? and queue = 0 limit ?)""",
|
||||
did = ? and queue = {QUEUE_TYPE_NEW} limit ?)""",
|
||||
did,
|
||||
lim,
|
||||
)
|
||||
@ -373,8 +380,8 @@ did = ? and queue = 0 limit ?)""",
|
||||
if lim:
|
||||
# fill the queue with the current did
|
||||
self._newQueue = self.col.db.list(
|
||||
"""
|
||||
select id from cards where did = ? and queue = 0 order by due,ord limit ?""",
|
||||
f"""
|
||||
select id from cards where did = ? and queue = {QUEUE_TYPE_NEW} order by due,ord limit ?""",
|
||||
did,
|
||||
lim,
|
||||
)
|
||||
@ -436,9 +443,9 @@ did = ? and queue = 0 limit ?)""",
|
||||
return 0
|
||||
lim = min(lim, self.reportLimit)
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from
|
||||
(select 1 from cards where did = ? and queue = 0 limit ?)""",
|
||||
(select 1 from cards where did = ? and queue = {QUEUE_TYPE_NEW} limit ?)""",
|
||||
did,
|
||||
lim,
|
||||
)
|
||||
@ -452,9 +459,9 @@ select count() from
|
||||
|
||||
def totalNewForCurrentDeck(self):
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from cards where id in (
|
||||
select id from cards where did in %s and queue = 0 limit ?)"""
|
||||
select id from cards where did in %s and queue = {QUEUE_TYPE_NEW} limit ?)"""
|
||||
% ids2str(self.col.decks.active()),
|
||||
self.reportLimit,
|
||||
)
|
||||
@ -466,9 +473,9 @@ select id from cards where did in %s and queue = 0 limit ?)"""
|
||||
# sub-day
|
||||
self.lrnCount = (
|
||||
self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select sum(left/1000) from (select left from cards where
|
||||
did in %s and queue = 1 and due < ? limit %d)"""
|
||||
did in %s and queue = {QUEUE_TYPE_LRN} and due < ? limit %d)"""
|
||||
% (self._deckLimit(), self.reportLimit),
|
||||
self.dayCutoff,
|
||||
)
|
||||
@ -476,8 +483,8 @@ did in %s and queue = 1 and due < ? limit %d)"""
|
||||
)
|
||||
# day
|
||||
self.lrnCount += self.col.db.scalar(
|
||||
"""
|
||||
select count() from cards where did in %s and queue = 3
|
||||
f"""
|
||||
select count() from cards where did in %s and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN}
|
||||
and due <= ? limit %d"""
|
||||
% (self._deckLimit(), self.reportLimit),
|
||||
self.today,
|
||||
@ -496,9 +503,9 @@ and due <= ? limit %d"""
|
||||
if self._lrnQueue:
|
||||
return True
|
||||
self._lrnQueue = self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select due, id from cards where
|
||||
did in %s and queue = 1 and due < :lim
|
||||
did in %s and queue = {QUEUE_TYPE_LRN} and due < :lim
|
||||
limit %d"""
|
||||
% (self._deckLimit(), self.reportLimit),
|
||||
lim=self.dayCutoff,
|
||||
@ -528,9 +535,9 @@ limit %d"""
|
||||
did = self._lrnDids[0]
|
||||
# fill the queue with the current did
|
||||
self._lrnDayQueue = self.col.db.list(
|
||||
"""
|
||||
f"""
|
||||
select id from cards where
|
||||
did = ? and queue = 3 and due <= ? limit ?""",
|
||||
did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
did,
|
||||
self.today,
|
||||
self.queueLimit,
|
||||
@ -556,25 +563,25 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
# ease 1=no, 2=yes, 3=remove
|
||||
conf = self._lrnConf(card)
|
||||
if card.odid and not card.wasNew:
|
||||
type = 3
|
||||
elif card.type == 2:
|
||||
type = 2
|
||||
type = REVLOG_CRAM
|
||||
elif card.type == CARD_TYPE_REV:
|
||||
type = REVLOG_RELRN
|
||||
else:
|
||||
type = 0
|
||||
type = REVLOG_LRN
|
||||
leaving = False
|
||||
# lrnCount was decremented once when card was fetched
|
||||
lastLeft = card.left
|
||||
# immediate graduate?
|
||||
if ease == 3:
|
||||
if ease == BUTTON_THREE:
|
||||
self._rescheduleAsRev(card, conf, True)
|
||||
leaving = True
|
||||
# graduation time?
|
||||
elif ease == 2 and (card.left % 1000) - 1 <= 0:
|
||||
elif ease == BUTTON_TWO and (card.left % 1000) - 1 <= 0:
|
||||
self._rescheduleAsRev(card, conf, False)
|
||||
leaving = True
|
||||
else:
|
||||
# one step towards graduation
|
||||
if ease == 2:
|
||||
if ease == BUTTON_TWO:
|
||||
# decrement real left count and recalculate left today
|
||||
left = (card.left % 1000) - 1
|
||||
card.left = self._leftToday(conf["delays"], left) * 1000 + left
|
||||
@ -601,7 +608,7 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
# if the queue is not empty and there's nothing else to do, make
|
||||
# sure we don't put it at the head of the queue and end up showing
|
||||
# it twice in a row
|
||||
card.queue = 1
|
||||
card.queue = QUEUE_TYPE_LRN
|
||||
if self._lrnQueue and not self.revCount and not self.newCount:
|
||||
smallestDue = self._lrnQueue[0][0]
|
||||
card.due = max(card.due, smallestDue + 1)
|
||||
@ -611,7 +618,7 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
# day learn queue
|
||||
ahead = ((card.due - self.dayCutoff) // 86400) + 1
|
||||
card.due = self.today + ahead
|
||||
card.queue = 3
|
||||
card.queue = QUEUE_TYPE_DAY_LEARN_RELEARN
|
||||
self._logLrn(card, ease, conf, leaving, type, lastLeft)
|
||||
|
||||
def _delayForGrade(self, conf, left):
|
||||
@ -627,13 +634,13 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
return delay * 60
|
||||
|
||||
def _lrnConf(self, card):
|
||||
if card.type == 2:
|
||||
if card.type == CARD_TYPE_REV:
|
||||
return self._lapseConf(card)
|
||||
else:
|
||||
return self._newConf(card)
|
||||
|
||||
def _rescheduleAsRev(self, card, conf, early):
|
||||
lapse = card.type == 2
|
||||
lapse = card.type == CARD_TYPE_REV
|
||||
if lapse:
|
||||
if self._resched(card):
|
||||
card.due = max(self.today + 1, card.odue)
|
||||
@ -642,8 +649,8 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
card.odue = 0
|
||||
else:
|
||||
self._rescheduleNew(card, conf, early)
|
||||
card.queue = 2
|
||||
card.type = 2
|
||||
card.queue = QUEUE_TYPE_REV
|
||||
card.type = CARD_TYPE_REV
|
||||
# if we were dynamic, graduating means moving back to the old deck
|
||||
resched = self._resched(card)
|
||||
if card.odid:
|
||||
@ -652,11 +659,11 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
card.odid = 0
|
||||
# if rescheduling is off, it needs to be set back to a new card
|
||||
if not resched and not lapse:
|
||||
card.queue = card.type = 0
|
||||
card.queue = card.type = CARD_TYPE_NEW
|
||||
card.due = self.col.nextID("pos")
|
||||
|
||||
def _startingLeft(self, card):
|
||||
if card.type == 2:
|
||||
if card.type == CARD_TYPE_REV:
|
||||
conf = self._lapseConf(card)
|
||||
else:
|
||||
conf = self._lrnConf(card)
|
||||
@ -678,7 +685,7 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
return ok + 1
|
||||
|
||||
def _graduatingIvl(self, card, conf, early, adj=True):
|
||||
if card.type == 2:
|
||||
if card.type == CARD_TYPE_REV:
|
||||
# lapsed card being relearnt
|
||||
if card.odid:
|
||||
if conf["resched"]:
|
||||
@ -736,25 +743,28 @@ did = ? and queue = 3 and due <= ? limit ?""",
|
||||
extra = " and did in " + ids2str(self.col.decks.allIds())
|
||||
# review cards in relearning
|
||||
self.col.db.execute(
|
||||
"""
|
||||
f"""
|
||||
update cards set
|
||||
due = odue, queue = 2, mod = %d, usn = %d, odue = 0
|
||||
where queue in (1,3) and type = 2
|
||||
due = odue, queue = {QUEUE_TYPE_REV}, mod = %d, usn = %d, odue = 0
|
||||
where queue in ({QUEUE_TYPE_LRN},{QUEUE_TYPE_DAY_LEARN_RELEARN}) and type = {CARD_TYPE_REV}
|
||||
%s
|
||||
"""
|
||||
% (intTime(), self.col.usn(), extra)
|
||||
)
|
||||
# new cards in learning
|
||||
self.forgetCards(
|
||||
self.col.db.list("select id from cards where queue in (1,3) %s" % extra)
|
||||
self.col.db.list(
|
||||
f"select id from cards where queue in ({QUEUE_TYPE_LRN},{QUEUE_TYPE_DAY_LEARN_RELEARN}) %s"
|
||||
% extra
|
||||
)
|
||||
)
|
||||
|
||||
def _lrnForDeck(self, did):
|
||||
cnt = (
|
||||
self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select sum(left/1000) from
|
||||
(select left from cards where did = ? and queue = 1 and due < ? limit ?)""",
|
||||
(select left from cards where did = ? and queue = {QUEUE_TYPE_LRN} and due < ? limit ?)""",
|
||||
did,
|
||||
intTime() + self.col.conf["collapseTime"],
|
||||
self.reportLimit,
|
||||
@ -762,9 +772,9 @@ select sum(left/1000) from
|
||||
or 0
|
||||
)
|
||||
return cnt + self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from
|
||||
(select 1 from cards where did = ? and queue = 3
|
||||
(select 1 from cards where did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN}
|
||||
and due <= ? limit ?)""",
|
||||
did,
|
||||
self.today,
|
||||
@ -786,9 +796,9 @@ and due <= ? limit ?)""",
|
||||
def _revForDeck(self, did, lim):
|
||||
lim = min(lim, self.reportLimit)
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from
|
||||
(select 1 from cards where did = ? and queue = 2
|
||||
(select 1 from cards where did = ? and queue = {QUEUE_TYPE_REV}
|
||||
and due <= ? limit ?)""",
|
||||
did,
|
||||
self.today,
|
||||
@ -798,9 +808,9 @@ and due <= ? limit ?)""",
|
||||
def _resetRevCount(self):
|
||||
def cntFn(did, lim):
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from (select id from cards where
|
||||
did = ? and queue = 2 and due <= ? limit %d)"""
|
||||
did = ? and queue = {QUEUE_TYPE_REV} and due <= ? limit %d)"""
|
||||
% lim,
|
||||
did,
|
||||
self.today,
|
||||
@ -824,9 +834,9 @@ did = ? and queue = 2 and due <= ? limit %d)"""
|
||||
if lim:
|
||||
# fill the queue with the current did
|
||||
self._revQueue = self.col.db.list(
|
||||
"""
|
||||
f"""
|
||||
select id from cards where
|
||||
did = ? and queue = 2 and due <= ? limit ?""",
|
||||
did = ? and queue = {QUEUE_TYPE_REV} and due <= ? limit ?""",
|
||||
did,
|
||||
self.today,
|
||||
lim,
|
||||
@ -861,9 +871,9 @@ did = ? and queue = 2 and due <= ? limit ?""",
|
||||
|
||||
def totalRevForCurrentDeck(self):
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from cards where id in (
|
||||
select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
select id from cards where did in %s and queue = {QUEUE_TYPE_REV} and due <= ? limit ?)"""
|
||||
% ids2str(self.col.decks.active()),
|
||||
self.today,
|
||||
self.reportLimit,
|
||||
@ -874,7 +884,7 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
|
||||
def _answerRevCard(self, card, ease):
|
||||
delay = 0
|
||||
if ease == 1:
|
||||
if ease == BUTTON_ONE:
|
||||
delay = self._rescheduleLapse(card)
|
||||
else:
|
||||
self._rescheduleRev(card, ease)
|
||||
@ -893,7 +903,7 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
card.odue = card.due
|
||||
# if suspended as a leech, nothing to do
|
||||
delay = 0
|
||||
if self._checkLeech(card, conf) and card.queue == -1:
|
||||
if self._checkLeech(card, conf) and card.queue == QUEUE_TYPE_SUSPENDED:
|
||||
return delay
|
||||
# if no relearning steps, nothing to do
|
||||
if not conf["delays"]:
|
||||
@ -907,13 +917,13 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
# queue 1
|
||||
if card.due < self.dayCutoff:
|
||||
self.lrnCount += card.left // 1000
|
||||
card.queue = 1
|
||||
card.queue = QUEUE_TYPE_LRN
|
||||
heappush(self._lrnQueue, (card.due, card.id))
|
||||
else:
|
||||
# day learn queue
|
||||
ahead = ((card.due - self.dayCutoff) // 86400) + 1
|
||||
card.due = self.today + ahead
|
||||
card.queue = 3
|
||||
card.queue = QUEUE_TYPE_DAY_LEARN_RELEARN
|
||||
return delay
|
||||
|
||||
def _nextLapseIvl(self, card, conf):
|
||||
@ -946,7 +956,7 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
card.lastIvl,
|
||||
card.factor,
|
||||
card.timeTaken(),
|
||||
1,
|
||||
REVLOG_REV,
|
||||
)
|
||||
|
||||
try:
|
||||
@ -969,11 +979,11 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
ivl4 = self._constrainedIvl(
|
||||
(card.ivl + delay) * fct * conf["ease4"], conf, ivl3
|
||||
)
|
||||
if ease == 2:
|
||||
if ease == BUTTON_TWO:
|
||||
interval = ivl2
|
||||
elif ease == 3:
|
||||
elif ease == BUTTON_THREE:
|
||||
interval = ivl3
|
||||
elif ease == 4:
|
||||
elif ease == BUTTON_FOUR:
|
||||
interval = ivl4
|
||||
# interval capped?
|
||||
return min(interval, conf["maxIvl"])
|
||||
@ -1058,9 +1068,9 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
self.col.log(self.col.db.list("select id from cards where %s" % lim))
|
||||
# move out of cram queue
|
||||
self.col.db.execute(
|
||||
"""
|
||||
update cards set did = odid, queue = (case when type = 1 then 0
|
||||
else type end), type = (case when type = 1 then 0 else type end),
|
||||
f"""
|
||||
update cards set did = odid, queue = (case when type = {CARD_TYPE_LRN} then {QUEUE_TYPE_NEW}
|
||||
else type end), type = (case when type = {CARD_TYPE_LRN} then {CARD_TYPE_NEW} else type end),
|
||||
due = odue, odue = 0, odid = 0, usn = ? where %s"""
|
||||
% lim,
|
||||
self.col.usn(),
|
||||
@ -1088,7 +1098,7 @@ due = odue, odue = 0, odid = 0, usn = ? where %s"""
|
||||
t = "c.due"
|
||||
elif o == DYN_DUEPRIORITY:
|
||||
t = (
|
||||
"(case when queue=2 and due <= %d then (ivl / cast(%d-due+0.001 as real)) else 100000+due end)"
|
||||
f"(case when queue={QUEUE_TYPE_REV} and due <= %d then (ivl / cast(%d-due+0.001 as real)) else 100000+due end)"
|
||||
% (self.today, self.today)
|
||||
)
|
||||
else:
|
||||
@ -1106,9 +1116,9 @@ due = odue, odue = 0, odid = 0, usn = ? where %s"""
|
||||
data.append((did, -100000 + c, u, id))
|
||||
# due reviews stay in the review queue. careful: can't use
|
||||
# "odid or did", as sqlite converts to boolean
|
||||
queue = """
|
||||
(case when type=2 and (case when odue then odue <= %d else due <= %d end)
|
||||
then 2 else 0 end)"""
|
||||
queue = f"""
|
||||
(case when type={CARD_TYPE_REV} and (case when odue then odue <= %d else due <= %d end)
|
||||
then {QUEUE_TYPE_REV} else {QUEUE_TYPE_NEW} end)"""
|
||||
queue %= (self.today, self.today)
|
||||
self.col.db.executemany(
|
||||
"""
|
||||
@ -1121,7 +1131,7 @@ did = ?, queue = %s, due = ?, usn = ? where id = ?"""
|
||||
)
|
||||
|
||||
def _dynIvlBoost(self, card):
|
||||
assert card.odid and card.type == 2
|
||||
assert card.odid and card.type == CARD_TYPE_REV
|
||||
assert card.factor
|
||||
elapsed = card.ivl - (card.odue - self.today)
|
||||
factor = ((card.factor / 1000) + 1.2) / 2
|
||||
@ -1145,14 +1155,14 @@ did = ?, queue = %s, due = ?, usn = ? where id = ?"""
|
||||
f.flush()
|
||||
# handle
|
||||
a = conf["leechAction"]
|
||||
if a == 0:
|
||||
if a == LEECH_SUSPEND:
|
||||
# if it has an old due, remove it from cram/relearning
|
||||
if card.odue:
|
||||
card.due = card.odue
|
||||
if card.odid:
|
||||
card.did = card.odid
|
||||
card.odue = card.odid = 0
|
||||
card.queue = -1
|
||||
card.queue = QUEUE_TYPE_SUSPENDED
|
||||
# notify UI
|
||||
hooks.card_did_leech(card)
|
||||
return True
|
||||
@ -1311,7 +1321,7 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
"True if there are any rev cards due."
|
||||
return self.col.db.scalar(
|
||||
(
|
||||
"select 1 from cards where did in %s and queue = 2 "
|
||||
f"select 1 from cards where did in %s and queue = {QUEUE_TYPE_REV} "
|
||||
"and due <= ? limit 1"
|
||||
)
|
||||
% self._deckLimit(),
|
||||
@ -1321,14 +1331,18 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
def newDue(self):
|
||||
"True if there are any new cards due."
|
||||
return self.col.db.scalar(
|
||||
("select 1 from cards where did in %s and queue = 0 " "limit 1")
|
||||
(
|
||||
f"select 1 from cards where did in %s and queue = {QUEUE_TYPE_NEW} "
|
||||
"limit 1"
|
||||
)
|
||||
% self._deckLimit()
|
||||
)
|
||||
|
||||
def haveBuried(self):
|
||||
sdids = ids2str(self.col.decks.active())
|
||||
cnt = self.col.db.scalar(
|
||||
"select 1 from cards where queue = -2 and did in %s limit 1" % sdids
|
||||
f"select 1 from cards where queue = {QUEUE_TYPE_SIBLING_BURIED} and did in %s limit 1"
|
||||
% sdids
|
||||
)
|
||||
return not not cnt
|
||||
|
||||
@ -1347,9 +1361,9 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
|
||||
def nextIvl(self, card, ease):
|
||||
"Return the next interval for CARD, in seconds."
|
||||
if card.queue in (0, 1, 3):
|
||||
if card.queue in (QUEUE_TYPE_NEW, QUEUE_TYPE_LRN, QUEUE_TYPE_DAY_LEARN_RELEARN):
|
||||
return self._nextLrnIvl(card, ease)
|
||||
elif ease == 1:
|
||||
elif ease == BUTTON_ONE:
|
||||
# lapsed
|
||||
conf = self._lapseConf(card)
|
||||
if conf["delays"]:
|
||||
@ -1364,10 +1378,10 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
if card.queue == 0:
|
||||
card.left = self._startingLeft(card)
|
||||
conf = self._lrnConf(card)
|
||||
if ease == 1:
|
||||
if ease == BUTTON_ONE:
|
||||
# fail
|
||||
return self._delayForGrade(conf, len(conf["delays"]))
|
||||
elif ease == 3:
|
||||
elif ease == BUTTON_THREE:
|
||||
# early removal
|
||||
if not self._resched(card):
|
||||
return 0
|
||||
@ -1391,7 +1405,8 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
self.remFromDyn(ids)
|
||||
self.removeLrn(ids)
|
||||
self.col.db.execute(
|
||||
"update cards set queue=-1,mod=?,usn=? where id in " + ids2str(ids),
|
||||
f"update cards set queue={QUEUE_TYPE_SUSPENDED},mod=?,usn=? where id in "
|
||||
+ ids2str(ids),
|
||||
intTime(),
|
||||
self.col.usn(),
|
||||
)
|
||||
@ -1401,7 +1416,7 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
self.col.log(ids)
|
||||
self.col.db.execute(
|
||||
"update cards set queue=type,mod=?,usn=? "
|
||||
"where queue = -1 and id in " + ids2str(ids),
|
||||
f"where queue = {QUEUE_TYPE_SUSPENDED} and id in " + ids2str(ids),
|
||||
intTime(),
|
||||
self.col.usn(),
|
||||
)
|
||||
@ -1411,8 +1426,8 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
self.remFromDyn(cids)
|
||||
self.removeLrn(cids)
|
||||
self.col.db.execute(
|
||||
"""
|
||||
update cards set queue=-2,mod=?,usn=? where id in """
|
||||
f"""
|
||||
update cards set queue={QUEUE_TYPE_SIBLING_BURIED},mod=?,usn=? where id in """
|
||||
+ ids2str(cids),
|
||||
intTime(),
|
||||
self.col.usn(),
|
||||
@ -1436,14 +1451,14 @@ update cards set queue=-2,mod=?,usn=? where id in """
|
||||
buryRev = rconf.get("bury", True)
|
||||
# loop through and remove from queues
|
||||
for cid, queue in self.col.db.execute(
|
||||
"""
|
||||
f"""
|
||||
select id, queue from cards where nid=? and id!=?
|
||||
and (queue=0 or (queue=2 and due<=?))""",
|
||||
and (queue={QUEUE_TYPE_NEW} or (queue={QUEUE_TYPE_REV} and due<=?))""",
|
||||
card.nid,
|
||||
card.id,
|
||||
self.today,
|
||||
):
|
||||
if queue == 2:
|
||||
if queue == QUEUE_TYPE_REV:
|
||||
if buryRev:
|
||||
toBury.append(cid)
|
||||
# if bury disabled, we still discard to give same-day spacing
|
||||
@ -1462,7 +1477,8 @@ and (queue=0 or (queue=2 and due<=?))""",
|
||||
# then bury
|
||||
if toBury:
|
||||
self.col.db.execute(
|
||||
"update cards set queue=-2,mod=?,usn=? where id in " + ids2str(toBury),
|
||||
f"update cards set queue={QUEUE_TYPE_SIBLING_BURIED},mod=?,usn=? where id in "
|
||||
+ ids2str(toBury),
|
||||
intTime(),
|
||||
self.col.usn(),
|
||||
)
|
||||
@ -1475,11 +1491,14 @@ and (queue=0 or (queue=2 and due<=?))""",
|
||||
"Put cards at the end of the new queue."
|
||||
self.remFromDyn(ids)
|
||||
self.col.db.execute(
|
||||
"update cards set type=0,queue=0,ivl=0,due=0,odue=0,factor=?"
|
||||
f"update cards set type={CARD_TYPE_NEW},queue={QUEUE_TYPE_NEW},ivl=0,due=0,odue=0,factor=?"
|
||||
" where id in " + ids2str(ids),
|
||||
STARTING_FACTOR,
|
||||
)
|
||||
pmax = self.col.db.scalar("select max(due) from cards where type=0") or 0
|
||||
pmax = (
|
||||
self.col.db.scalar(f"select max(due) from cards where type={CARD_TYPE_NEW}")
|
||||
or 0
|
||||
)
|
||||
# takes care of mod + usn
|
||||
self.sortCards(ids, start=pmax + 1)
|
||||
self.col.log(ids)
|
||||
@ -1503,8 +1522,8 @@ and (queue=0 or (queue=2 and due<=?))""",
|
||||
)
|
||||
self.remFromDyn(ids)
|
||||
self.col.db.executemany(
|
||||
"""
|
||||
update cards set type=2,queue=2,ivl=:ivl,due=:due,odue=0,
|
||||
f"""
|
||||
update cards set type={CARD_TYPE_REV},queue={QUEUE_TYPE_REV},ivl=:ivl,due=:due,odue=0,
|
||||
usn=:usn,mod=:mod,factor=:fact where id=:id""",
|
||||
d,
|
||||
)
|
||||
@ -1515,11 +1534,12 @@ usn=:usn,mod=:mod,factor=:fact where id=:id""",
|
||||
sids = ids2str(ids)
|
||||
# we want to avoid resetting due number of existing new cards on export
|
||||
nonNew = self.col.db.list(
|
||||
"select id from cards where id in %s and (queue != 0 or type != 0)" % sids
|
||||
f"select id from cards where id in %s and (queue != {QUEUE_TYPE_NEW} or type != {CARD_TYPE_NEW})"
|
||||
% sids
|
||||
)
|
||||
# reset all cards
|
||||
self.col.db.execute(
|
||||
"update cards set reps=0,lapses=0,odid=0,odue=0,queue=0"
|
||||
f"update cards set reps=0,lapses=0,odid=0,odue=0,queue={QUEUE_TYPE_NEW}"
|
||||
" where id in %s" % sids
|
||||
)
|
||||
# and forget any non-new cards, changing their due numbers
|
||||
@ -1553,16 +1573,16 @@ usn=:usn,mod=:mod,factor=:fact where id=:id""",
|
||||
# shift?
|
||||
if shift:
|
||||
low = self.col.db.scalar(
|
||||
"select min(due) from cards where due >= ? and type = 0 "
|
||||
f"select min(due) from cards where due >= ? and type = {CARD_TYPE_NEW} "
|
||||
"and id not in %s" % scids,
|
||||
start,
|
||||
)
|
||||
if low is not None:
|
||||
shiftby = high - low + 1
|
||||
self.col.db.execute(
|
||||
"""
|
||||
f"""
|
||||
update cards set mod=?, usn=?, due=due+? where id not in %s
|
||||
and due >= ? and queue = 0"""
|
||||
and due >= ? and queue = {QUEUE_TYPE_NEW}"""
|
||||
% scids,
|
||||
now,
|
||||
self.col.usn(),
|
||||
@ -1572,7 +1592,7 @@ and due >= ? and queue = 0"""
|
||||
# reorder cards
|
||||
d = []
|
||||
for id, nid in self.col.db.execute(
|
||||
"select id, nid from cards where type = 0 and id in " + scids
|
||||
f"select id, nid from cards where type = {CARD_TYPE_NEW} and id in " + scids
|
||||
):
|
||||
d.append(dict(now=now, due=due[nid], usn=self.col.usn(), cid=id))
|
||||
self.col.db.executemany(
|
||||
|
@ -22,13 +22,9 @@ from anki.rsbackend import SchedTimingToday
|
||||
from anki.utils import fmtTimeSpan, ids2str, intTime
|
||||
|
||||
# card types: 0=new, 1=lrn, 2=rev, 3=relrn
|
||||
CARD_TYPE_RELEARNING = 3
|
||||
# queue types: 0=new, 1=(re)lrn, 2=rev, 3=day (re)lrn,
|
||||
# 4=preview, -1=suspended, -2=sibling buried, -3=manually buried
|
||||
QUEUE_TYPE_PREVIEW = 4
|
||||
QUEUE_TYPE_DAY_LEARN_RELEARN = 3
|
||||
QUEUE_TYPE_SIBLING_BURIED = -2
|
||||
QUEUE_TYPE_MANUALLY_BURIED = -3
|
||||
|
||||
# revlog types: 0=lrn, 1=rev, 2=relrn, 3=early review
|
||||
# positive revlog intervals are in days (rev), negative in seconds (lrn)
|
||||
# odue/odid store original due/did when cards moved to filtered deck
|
||||
@ -95,18 +91,18 @@ class Scheduler:
|
||||
|
||||
card.reps += 1
|
||||
|
||||
if card.queue == 0:
|
||||
if card.queue == QUEUE_TYPE_NEW:
|
||||
# came from the new queue, move to learning
|
||||
card.queue = 1
|
||||
card.type = 1
|
||||
card.queue = QUEUE_TYPE_LRN
|
||||
card.type = CARD_TYPE_LRN
|
||||
# init reps to graduation
|
||||
card.left = self._startingLeft(card)
|
||||
# update daily limit
|
||||
self._updateStats(card, "new")
|
||||
|
||||
if card.queue in (1, QUEUE_TYPE_DAY_LEARN_RELEARN):
|
||||
if card.queue in (QUEUE_TYPE_LRN, QUEUE_TYPE_DAY_LEARN_RELEARN):
|
||||
self._answerLrnCard(card, ease)
|
||||
elif card.queue == 2:
|
||||
elif card.queue == QUEUE_TYPE_REV:
|
||||
self._answerRevCard(card, ease)
|
||||
# update daily limit
|
||||
self._updateStats(card, "rev")
|
||||
@ -121,12 +117,13 @@ class Scheduler:
|
||||
def _answerCardPreview(self, card: Card, ease: int) -> None:
|
||||
assert 1 <= ease <= 2
|
||||
|
||||
if ease == 1:
|
||||
if ease == BUTTON_ONE:
|
||||
# repeat after delay
|
||||
card.queue = QUEUE_TYPE_PREVIEW
|
||||
card.due = intTime() + self._previewDelay(card)
|
||||
self.lrnCount += 1
|
||||
else:
|
||||
# BUTTON_TWO
|
||||
# restore original card state and remove from filtered deck
|
||||
self._restorePreviewCard(card)
|
||||
self._removeFromFiltered(card)
|
||||
@ -142,9 +139,9 @@ class Scheduler:
|
||||
"Return counts over next DAYS. Includes today."
|
||||
daysd = dict(
|
||||
self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select due, count() from cards
|
||||
where did in %s and queue = 2
|
||||
where did in %s and queue = {QUEUE_TYPE_REV}
|
||||
and due between ? and ?
|
||||
group by due
|
||||
order by due"""
|
||||
@ -368,9 +365,9 @@ order by due"""
|
||||
|
||||
def _resetNewCount(self) -> None:
|
||||
cntFn = lambda did, lim: self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from (select 1 from cards where
|
||||
did = ? and queue = 0 limit ?)""",
|
||||
did = ? and queue = {QUEUE_TYPE_NEW} limit ?)""",
|
||||
did,
|
||||
lim,
|
||||
)
|
||||
@ -393,8 +390,8 @@ did = ? and queue = 0 limit ?)""",
|
||||
if lim:
|
||||
# fill the queue with the current did
|
||||
self._newQueue = self.col.db.list(
|
||||
"""
|
||||
select id from cards where did = ? and queue = 0 order by due,ord limit ?""",
|
||||
f"""
|
||||
select id from cards where did = ? and queue = {QUEUE_TYPE_NEW} order by due,ord limit ?""",
|
||||
did,
|
||||
lim,
|
||||
)
|
||||
@ -462,9 +459,9 @@ did = ? and queue = 0 limit ?)""",
|
||||
return 0
|
||||
lim = min(lim, self.reportLimit)
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from
|
||||
(select 1 from cards where did = ? and queue = 0 limit ?)""",
|
||||
(select 1 from cards where did = ? and queue = {QUEUE_TYPE_NEW} limit ?)""",
|
||||
did,
|
||||
lim,
|
||||
)
|
||||
@ -478,9 +475,9 @@ select count() from
|
||||
|
||||
def totalNewForCurrentDeck(self) -> Any:
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from cards where id in (
|
||||
select id from cards where did in %s and queue = 0 limit ?)"""
|
||||
select id from cards where did in %s and queue = {QUEUE_TYPE_NEW} limit ?)"""
|
||||
% self._deckLimit(),
|
||||
self.reportLimit,
|
||||
)
|
||||
@ -504,8 +501,8 @@ select id from cards where did in %s and queue = 0 limit ?)"""
|
||||
# sub-day
|
||||
self.lrnCount = (
|
||||
self.col.db.scalar(
|
||||
"""
|
||||
select count() from cards where did in %s and queue = 1
|
||||
f"""
|
||||
select count() from cards where did in %s and queue = {QUEUE_TYPE_LRN}
|
||||
and due < ?"""
|
||||
% (self._deckLimit()),
|
||||
self._lrnCutoff,
|
||||
@ -545,7 +542,7 @@ select count() from cards where did in %s and queue = {QUEUE_TYPE_PREVIEW}
|
||||
self._lrnQueue = self.col.db.all(
|
||||
f"""
|
||||
select due, id from cards where
|
||||
did in %s and queue in (1,{QUEUE_TYPE_PREVIEW}) and due < :lim
|
||||
did in %s and queue in ({QUEUE_TYPE_LRN},{QUEUE_TYPE_PREVIEW}) and due < :lim
|
||||
limit %d"""
|
||||
% (self._deckLimit(), self.reportLimit),
|
||||
lim=cutoff,
|
||||
@ -606,28 +603,28 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
|
||||
def _answerLrnCard(self, card: Card, ease: int) -> None:
|
||||
conf = self._lrnConf(card)
|
||||
if card.type in (2, CARD_TYPE_RELEARNING):
|
||||
type = 2
|
||||
if card.type in (CARD_TYPE_REV, CARD_TYPE_RELEARNING):
|
||||
type = REVLOG_RELRN
|
||||
else:
|
||||
type = 0
|
||||
type = REVLOG_LRN
|
||||
# lrnCount was decremented once when card was fetched
|
||||
lastLeft = card.left
|
||||
|
||||
leaving = False
|
||||
|
||||
# immediate graduate?
|
||||
if ease == 4:
|
||||
if ease == BUTTON_FOUR:
|
||||
self._rescheduleAsRev(card, conf, True)
|
||||
leaving = True
|
||||
# next step?
|
||||
elif ease == 3:
|
||||
elif ease == BUTTON_THREE:
|
||||
# graduation time?
|
||||
if (card.left % 1000) - 1 <= 0:
|
||||
self._rescheduleAsRev(card, conf, False)
|
||||
leaving = True
|
||||
else:
|
||||
self._moveToNextStep(card, conf)
|
||||
elif ease == 2:
|
||||
elif ease == BUTTON_TWO:
|
||||
self._repeatStep(card, conf)
|
||||
else:
|
||||
# back to first step
|
||||
@ -671,9 +668,9 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
if card.due < self.dayCutoff:
|
||||
# add some randomness, up to 5 minutes or 25%
|
||||
maxExtra = min(300, int(delay * 0.25))
|
||||
fuzz = random.randrange(0, maxExtra)
|
||||
fuzz = random.randrange(0, max(1, maxExtra))
|
||||
card.due = min(self.dayCutoff - 1, card.due + fuzz)
|
||||
card.queue = 1
|
||||
card.queue = QUEUE_TYPE_LRN
|
||||
if card.due < (intTime() + self.col.conf["collapseTime"]):
|
||||
self.lrnCount += 1
|
||||
# if the queue is not empty and there's nothing else to do, make
|
||||
@ -714,13 +711,13 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
return avg
|
||||
|
||||
def _lrnConf(self, card: Card) -> Any:
|
||||
if card.type in (2, CARD_TYPE_RELEARNING):
|
||||
if card.type in (CARD_TYPE_REV, CARD_TYPE_RELEARNING):
|
||||
return self._lapseConf(card)
|
||||
else:
|
||||
return self._newConf(card)
|
||||
|
||||
def _rescheduleAsRev(self, card: Card, conf: Dict[str, Any], early: bool) -> None:
|
||||
lapse = card.type in (2, CARD_TYPE_RELEARNING)
|
||||
lapse = card.type in (CARD_TYPE_REV, CARD_TYPE_RELEARNING)
|
||||
|
||||
if lapse:
|
||||
self._rescheduleGraduatingLapse(card, early)
|
||||
@ -735,8 +732,8 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
if early:
|
||||
card.ivl += 1
|
||||
card.due = self.today + card.ivl
|
||||
card.queue = 2
|
||||
card.type = 2
|
||||
card.queue = QUEUE_TYPE_REV
|
||||
card.type = CARD_TYPE_REV
|
||||
|
||||
def _startingLeft(self, card: Card) -> int:
|
||||
if card.type == CARD_TYPE_RELEARNING:
|
||||
@ -768,7 +765,7 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
def _graduatingIvl(
|
||||
self, card: Card, conf: Dict[str, Any], early: bool, fuzz: bool = True
|
||||
) -> Any:
|
||||
if card.type in (2, CARD_TYPE_RELEARNING):
|
||||
if card.type in (CARD_TYPE_REV, CARD_TYPE_RELEARNING):
|
||||
bonus = early and 1 or 0
|
||||
return card.ivl + bonus
|
||||
if not early:
|
||||
@ -786,7 +783,7 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
card.ivl = self._graduatingIvl(card, conf, early)
|
||||
card.due = self.today + card.ivl
|
||||
card.factor = conf["initialFactor"]
|
||||
card.type = card.queue = 2
|
||||
card.type = card.queue = QUEUE_TYPE_REV
|
||||
|
||||
def _logLrn(
|
||||
self,
|
||||
@ -801,7 +798,7 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
if leaving:
|
||||
ivl = card.ivl
|
||||
else:
|
||||
if ease == 2:
|
||||
if ease == BUTTON_TWO:
|
||||
ivl = -self._delayForRepeatingGrade(conf, card.left)
|
||||
else:
|
||||
ivl = -self._delayForGrade(conf, card.left)
|
||||
@ -830,9 +827,9 @@ did = ? and queue = {QUEUE_TYPE_DAY_LEARN_RELEARN} and due <= ? limit ?""",
|
||||
def _lrnForDeck(self, did: int) -> Any:
|
||||
cnt = (
|
||||
self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from
|
||||
(select null from cards where did = ? and queue = 1 and due < ? limit ?)""",
|
||||
(select null from cards where did = ? and queue = {QUEUE_TYPE_LRN} and due < ? limit ?)""",
|
||||
did,
|
||||
intTime() + self.col.conf["collapseTime"],
|
||||
self.reportLimit,
|
||||
@ -883,9 +880,9 @@ and due <= ? limit ?)""",
|
||||
dids = [did] + self.col.decks.childDids(did, childMap)
|
||||
lim = min(lim, self.reportLimit)
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from
|
||||
(select 1 from cards where did in %s and queue = 2
|
||||
(select 1 from cards where did in %s and queue = {QUEUE_TYPE_REV}
|
||||
and due <= ? limit ?)"""
|
||||
% ids2str(dids),
|
||||
self.today,
|
||||
@ -895,9 +892,9 @@ and due <= ? limit ?)"""
|
||||
def _resetRevCount(self) -> None:
|
||||
lim = self._currentRevLimit()
|
||||
self.revCount = self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from (select id from cards where
|
||||
did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
did in %s and queue = {QUEUE_TYPE_REV} and due <= ? limit ?)"""
|
||||
% self._deckLimit(),
|
||||
self.today,
|
||||
lim,
|
||||
@ -916,9 +913,9 @@ did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
lim = min(self.queueLimit, self._currentRevLimit())
|
||||
if lim:
|
||||
self._revQueue = self.col.db.list(
|
||||
"""
|
||||
f"""
|
||||
select id from cards where
|
||||
did in %s and queue = 2 and due <= ?
|
||||
did in %s and queue = {QUEUE_TYPE_REV} and due <= ?
|
||||
order by due, random()
|
||||
limit ?"""
|
||||
% self._deckLimit(),
|
||||
@ -946,9 +943,9 @@ limit ?"""
|
||||
|
||||
def totalRevForCurrentDeck(self) -> int:
|
||||
return self.col.db.scalar(
|
||||
"""
|
||||
f"""
|
||||
select count() from cards where id in (
|
||||
select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
select id from cards where did in %s and queue = {QUEUE_TYPE_REV} and due <= ? limit ?)"""
|
||||
% self._deckLimit(),
|
||||
self.today,
|
||||
self.reportLimit,
|
||||
@ -960,9 +957,9 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
def _answerRevCard(self, card: Card, ease: int) -> None:
|
||||
delay = 0
|
||||
early = bool(card.odid and (card.odue > self.today))
|
||||
type = early and 3 or 1
|
||||
type = early and REVLOG_CRAM or REVLOG_REV
|
||||
|
||||
if ease == 1:
|
||||
if ease == BUTTON_ONE:
|
||||
delay = self._rescheduleLapse(card)
|
||||
else:
|
||||
self._rescheduleRev(card, ease, early)
|
||||
@ -976,7 +973,7 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
card.lapses += 1
|
||||
card.factor = max(1300, card.factor - 200)
|
||||
|
||||
suspended = self._checkLeech(card, conf) and card.queue == -1
|
||||
suspended = self._checkLeech(card, conf) and card.queue == QUEUE_TYPE_SUSPENDED
|
||||
|
||||
if conf["delays"] and not suspended:
|
||||
card.type = CARD_TYPE_RELEARNING
|
||||
@ -987,7 +984,7 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
self._rescheduleAsRev(card, conf, early=False)
|
||||
# need to reset the queue after rescheduling
|
||||
if suspended:
|
||||
card.queue = -1
|
||||
card.queue = QUEUE_TYPE_SUSPENDED
|
||||
delay = 0
|
||||
|
||||
return delay
|
||||
@ -1047,11 +1044,11 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
else:
|
||||
hardMin = 0
|
||||
ivl2 = self._constrainedIvl(card.ivl * hardFactor, conf, hardMin, fuzz)
|
||||
if ease == 2:
|
||||
if ease == BUTTON_TWO:
|
||||
return ivl2
|
||||
|
||||
ivl3 = self._constrainedIvl((card.ivl + delay // 2) * fct, conf, ivl2, fuzz)
|
||||
if ease == 3:
|
||||
if ease == BUTTON_THREE:
|
||||
return ivl3
|
||||
|
||||
ivl4 = self._constrainedIvl(
|
||||
@ -1101,7 +1098,7 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
|
||||
# next interval for card when answered early+correctly
|
||||
def _earlyReviewIvl(self, card: Card, ease: int) -> int:
|
||||
assert card.odid and card.type == 2
|
||||
assert card.odid and card.type == CARD_TYPE_REV
|
||||
assert card.factor
|
||||
assert ease > 1
|
||||
|
||||
@ -1113,14 +1110,14 @@ select id from cards where did in %s and queue = 2 and due <= ? limit ?)"""
|
||||
# early 3/4 reviews shouldn't decrease previous interval
|
||||
minNewIvl = 1
|
||||
|
||||
if ease == 2:
|
||||
if ease == BUTTON_TWO:
|
||||
factor = conf.get("hardFactor", 1.2)
|
||||
# hard cards shouldn't have their interval decreased by more than 50%
|
||||
# of the normal factor
|
||||
minNewIvl = factor / 2
|
||||
elif ease == 3:
|
||||
elif ease == BUTTON_THREE:
|
||||
factor = card.factor / 1000
|
||||
else: # ease == 4:
|
||||
else: # ease == BUTTON_FOUR:
|
||||
factor = card.factor / 1000
|
||||
ease4 = conf["ease4"]
|
||||
# 1.3 -> 1.15
|
||||
@ -1213,7 +1210,7 @@ due = (case when odue>0 then odue else due end), odue = 0, odid = 0, usn = ? whe
|
||||
t = "n.id desc"
|
||||
elif o == DYN_DUEPRIORITY:
|
||||
t = (
|
||||
"(case when queue=2 and due <= %d then (ivl / cast(%d-due+0.001 as real)) else 100000+due end)"
|
||||
f"(case when queue={QUEUE_TYPE_REV} and due <= %d then (ivl / cast(%d-due+0.001 as real)) else 100000+due end)"
|
||||
% (self.today, self.today)
|
||||
)
|
||||
else: # DYN_DUE or unknown
|
||||
@ -1231,7 +1228,7 @@ due = (case when odue>0 then odue else due end), odue = 0, odid = 0, usn = ? whe
|
||||
|
||||
queue = ""
|
||||
if not deck["resched"]:
|
||||
queue = ",queue=2"
|
||||
queue = f",queue={QUEUE_TYPE_REV}"
|
||||
|
||||
query = (
|
||||
"""
|
||||
@ -1260,9 +1257,9 @@ where id = ?
|
||||
|
||||
# learning and relearning cards may be seconds-based or day-based;
|
||||
# other types map directly to queues
|
||||
if card.type in (1, CARD_TYPE_RELEARNING):
|
||||
if card.type in (CARD_TYPE_LRN, CARD_TYPE_RELEARNING):
|
||||
if card.odue > 1000000000:
|
||||
card.queue = 1
|
||||
card.queue = QUEUE_TYPE_LRN
|
||||
else:
|
||||
card.queue = QUEUE_TYPE_DAY_LEARN_RELEARN
|
||||
else:
|
||||
@ -1284,8 +1281,8 @@ where id = ?
|
||||
f.flush()
|
||||
# handle
|
||||
a = conf["leechAction"]
|
||||
if a == 0:
|
||||
card.queue = -1
|
||||
if a == LEECH_SUSPEND:
|
||||
card.queue = QUEUE_TYPE_SUSPENDED
|
||||
# notify UI
|
||||
hooks.card_did_leech(card)
|
||||
return True
|
||||
@ -1509,7 +1506,7 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
"True if there are any rev cards due."
|
||||
return self.col.db.scalar(
|
||||
(
|
||||
"select 1 from cards where did in %s and queue = 2 "
|
||||
f"select 1 from cards where did in %s and queue = {QUEUE_TYPE_REV} "
|
||||
"and due <= ? limit 1"
|
||||
)
|
||||
% self._deckLimit(),
|
||||
@ -1519,7 +1516,10 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
def newDue(self) -> Any:
|
||||
"True if there are any new cards due."
|
||||
return self.col.db.scalar(
|
||||
("select 1 from cards where did in %s and queue = 0 " "limit 1")
|
||||
(
|
||||
f"select 1 from cards where did in %s and queue = {QUEUE_TYPE_NEW} "
|
||||
"limit 1"
|
||||
)
|
||||
% self._deckLimit()
|
||||
)
|
||||
|
||||
@ -1557,14 +1557,14 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
"Return the next interval for CARD, in seconds."
|
||||
# preview mode?
|
||||
if self._previewingCard(card):
|
||||
if ease == 1:
|
||||
if ease == BUTTON_ONE:
|
||||
return self._previewDelay(card)
|
||||
return 0
|
||||
|
||||
# (re)learning?
|
||||
if card.queue in (0, 1, QUEUE_TYPE_DAY_LEARN_RELEARN):
|
||||
if card.queue in (QUEUE_TYPE_NEW, QUEUE_TYPE_LRN, QUEUE_TYPE_DAY_LEARN_RELEARN):
|
||||
return self._nextLrnIvl(card, ease)
|
||||
elif ease == 1:
|
||||
elif ease == BUTTON_ONE:
|
||||
# lapse
|
||||
conf = self._lapseConf(card)
|
||||
if conf["delays"]:
|
||||
@ -1580,17 +1580,17 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
|
||||
# this isn't easily extracted from the learn code
|
||||
def _nextLrnIvl(self, card: Card, ease: int) -> Any:
|
||||
if card.queue == 0:
|
||||
if card.queue == QUEUE_TYPE_NEW:
|
||||
card.left = self._startingLeft(card)
|
||||
conf = self._lrnConf(card)
|
||||
if ease == 1:
|
||||
if ease == BUTTON_ONE:
|
||||
# fail
|
||||
return self._delayForGrade(conf, len(conf["delays"]))
|
||||
elif ease == 2:
|
||||
elif ease == BUTTON_TWO:
|
||||
return self._delayForRepeatingGrade(conf, card.left)
|
||||
elif ease == 4:
|
||||
elif ease == BUTTON_FOUR:
|
||||
return self._graduatingIvl(card, conf, True, fuzz=False) * 86400
|
||||
else: # ease == 3
|
||||
else: # ease == BUTTON_THREE
|
||||
left = card.left % 1000 - 1
|
||||
if left <= 0:
|
||||
# graduate
|
||||
@ -1604,7 +1604,7 @@ To study outside of the normal schedule, click the Custom Study button below."""
|
||||
# learning and relearning cards may be seconds-based or day-based;
|
||||
# other types map directly to queues
|
||||
_restoreQueueSnippet = f"""
|
||||
queue = (case when type in (1,{CARD_TYPE_RELEARNING}) then
|
||||
queue = (case when type in ({CARD_TYPE_LRN},{CARD_TYPE_RELEARNING}) then
|
||||
(case when (case when odue then odue else due end) > 1000000000 then 1 else
|
||||
{QUEUE_TYPE_DAY_LEARN_RELEARN} end)
|
||||
else
|
||||
@ -1616,7 +1616,8 @@ end)
|
||||
"Suspend cards."
|
||||
self.col.log(ids)
|
||||
self.col.db.execute(
|
||||
"update cards set queue=-1,mod=?,usn=? where id in " + ids2str(ids),
|
||||
f"update cards set queue={QUEUE_TYPE_SUSPENDED},mod=?,usn=? where id in "
|
||||
+ ids2str(ids),
|
||||
intTime(),
|
||||
self.col.usn(),
|
||||
)
|
||||
@ -1625,7 +1626,9 @@ end)
|
||||
"Unsuspend cards."
|
||||
self.col.log(ids)
|
||||
self.col.db.execute(
|
||||
("update cards set %s,mod=?,usn=? " "where queue = -1 and id in %s")
|
||||
(
|
||||
f"update cards set %s,mod=?,usn=? where queue = {QUEUE_TYPE_SUSPENDED} and id in %s"
|
||||
)
|
||||
% (self._restoreQueueSnippet, ids2str(ids)),
|
||||
intTime(),
|
||||
self.col.usn(),
|
||||
@ -1646,7 +1649,7 @@ update cards set queue=?,mod=?,usn=? where id in """
|
||||
def buryNote(self, nid) -> None:
|
||||
"Bury all cards for note until next session."
|
||||
cids = self.col.db.list(
|
||||
"select id from cards where nid = ? and queue >= 0", nid
|
||||
f"select id from cards where nid = ? and queue >= {QUEUE_TYPE_NEW}", nid
|
||||
)
|
||||
self.buryCards(cids)
|
||||
|
||||
@ -1654,11 +1657,11 @@ update cards set queue=?,mod=?,usn=? where id in """
|
||||
"Unbury all buried cards in all decks."
|
||||
self.col.log(
|
||||
self.col.db.list(
|
||||
f"select id from cards where queue in (-2, {QUEUE_TYPE_MANUALLY_BURIED})"
|
||||
f"select id from cards where queue in ({QUEUE_TYPE_SIBLING_BURIED}, {QUEUE_TYPE_MANUALLY_BURIED})"
|
||||
)
|
||||
)
|
||||
self.col.db.execute(
|
||||
f"update cards set %s where queue in (-2, {QUEUE_TYPE_MANUALLY_BURIED})"
|
||||
f"update cards set %s where queue in ({QUEUE_TYPE_SIBLING_BURIED}, {QUEUE_TYPE_MANUALLY_BURIED})"
|
||||
% self._restoreQueueSnippet
|
||||
)
|
||||
|
||||
@ -1698,14 +1701,14 @@ update cards set queue=?,mod=?,usn=? where id in """
|
||||
buryRev = rconf.get("bury", True)
|
||||
# loop through and remove from queues
|
||||
for cid, queue in self.col.db.execute(
|
||||
"""
|
||||
f"""
|
||||
select id, queue from cards where nid=? and id!=?
|
||||
and (queue=0 or (queue=2 and due<=?))""",
|
||||
and (queue={QUEUE_TYPE_NEW} or (queue={QUEUE_TYPE_REV} and due<=?))""",
|
||||
card.nid,
|
||||
card.id,
|
||||
self.today,
|
||||
):
|
||||
if queue == 2:
|
||||
if queue == QUEUE_TYPE_REV:
|
||||
if buryRev:
|
||||
toBury.append(cid)
|
||||
# if bury disabled, we still discard to give same-day spacing
|
||||
@ -1732,11 +1735,14 @@ and (queue=0 or (queue=2 and due<=?))""",
|
||||
"Put cards at the end of the new queue."
|
||||
self.remFromDyn(ids)
|
||||
self.col.db.execute(
|
||||
"update cards set type=0,queue=0,ivl=0,due=0,odue=0,factor=?"
|
||||
f"update cards set type={CARD_TYPE_NEW},queue={QUEUE_TYPE_NEW},ivl=0,due=0,odue=0,factor=?"
|
||||
" where id in " + ids2str(ids),
|
||||
STARTING_FACTOR,
|
||||
)
|
||||
pmax = self.col.db.scalar("select max(due) from cards where type=0") or 0
|
||||
pmax = (
|
||||
self.col.db.scalar(f"select max(due) from cards where type={CARD_TYPE_NEW}")
|
||||
or 0
|
||||
)
|
||||
# takes care of mod + usn
|
||||
self.sortCards(ids, start=pmax + 1)
|
||||
self.col.log(ids)
|
||||
@ -1760,8 +1766,8 @@ and (queue=0 or (queue=2 and due<=?))""",
|
||||
)
|
||||
self.remFromDyn(ids)
|
||||
self.col.db.executemany(
|
||||
"""
|
||||
update cards set type=2,queue=2,ivl=:ivl,due=:due,odue=0,
|
||||
f"""
|
||||
update cards set type={CARD_TYPE_REV},queue={QUEUE_TYPE_REV},ivl=:ivl,due=:due,odue=0,
|
||||
usn=:usn,mod=:mod,factor=:fact where id=:id""",
|
||||
d,
|
||||
)
|
||||
@ -1772,11 +1778,12 @@ usn=:usn,mod=:mod,factor=:fact where id=:id""",
|
||||
sids = ids2str(ids)
|
||||
# we want to avoid resetting due number of existing new cards on export
|
||||
nonNew = self.col.db.list(
|
||||
"select id from cards where id in %s and (queue != 0 or type != 0)" % sids
|
||||
f"select id from cards where id in %s and (queue != {QUEUE_TYPE_NEW} or type != {CARD_TYPE_NEW})"
|
||||
% sids
|
||||
)
|
||||
# reset all cards
|
||||
self.col.db.execute(
|
||||
"update cards set reps=0,lapses=0,odid=0,odue=0,queue=0"
|
||||
f"update cards set reps=0,lapses=0,odid=0,odue=0,queue={QUEUE_TYPE_NEW}"
|
||||
" where id in %s" % sids
|
||||
)
|
||||
# and forget any non-new cards, changing their due numbers
|
||||
@ -1817,16 +1824,16 @@ usn=:usn,mod=:mod,factor=:fact where id=:id""",
|
||||
# shift?
|
||||
if shift:
|
||||
low = self.col.db.scalar(
|
||||
"select min(due) from cards where due >= ? and type = 0 "
|
||||
f"select min(due) from cards where due >= ? and type = {CARD_TYPE_NEW} "
|
||||
"and id not in %s" % scids,
|
||||
start,
|
||||
)
|
||||
if low is not None:
|
||||
shiftby = high - low + 1
|
||||
self.col.db.execute(
|
||||
"""
|
||||
f"""
|
||||
update cards set mod=?, usn=?, due=due+? where id not in %s
|
||||
and due >= ? and queue = 0"""
|
||||
and due >= ? and queue = {QUEUE_TYPE_NEW}"""
|
||||
% scids,
|
||||
now,
|
||||
self.col.usn(),
|
||||
@ -1836,7 +1843,7 @@ and due >= ? and queue = 0"""
|
||||
# reorder cards
|
||||
d = []
|
||||
for id, nid in self.col.db.execute(
|
||||
"select id, nid from cards where type = 0 and id in " + scids
|
||||
f"select id, nid from cards where type = {CARD_TYPE_NEW} and id in " + scids
|
||||
):
|
||||
d.append(dict(now=now, due=due[nid], usn=self.col.usn(), cid=id))
|
||||
self.col.db.executemany(
|
||||
@ -1874,11 +1881,11 @@ and due >= ? and queue = 0"""
|
||||
self.col.db.execute(
|
||||
f"""
|
||||
update cards set did = odid, queue = (case
|
||||
when type = 1 then 0
|
||||
when type = {CARD_TYPE_RELEARNING} then 2
|
||||
when type = {CARD_TYPE_LRN} then {QUEUE_TYPE_NEW}
|
||||
when type = {CARD_TYPE_RELEARNING} then {QUEUE_TYPE_REV}
|
||||
else type end), type = (case
|
||||
when type = 1 then 0
|
||||
when type = {CARD_TYPE_RELEARNING} then 2
|
||||
when type = {CARD_TYPE_LRN} then {CARD_TYPE_NEW}
|
||||
when type = {CARD_TYPE_RELEARNING} then {CARD_TYPE_REV}
|
||||
else type end),
|
||||
due = odue, odue = 0, odid = 0, usn = ? where odid != 0""",
|
||||
self.col.usn(),
|
||||
@ -1890,8 +1897,8 @@ due = odue, odue = 0, odid = 0, usn = ? where odid != 0""",
|
||||
self.col.db.execute(
|
||||
f"""
|
||||
update cards set
|
||||
due = odue, queue = 2, type = 2, mod = %d, usn = %d, odue = 0
|
||||
where queue in (1,{QUEUE_TYPE_DAY_LEARN_RELEARN}) and type in (2, {CARD_TYPE_RELEARNING})
|
||||
due = odue, queue = {QUEUE_TYPE_REV}, type = {CARD_TYPE_REV}, mod = %d, usn = %d, odue = 0
|
||||
where queue in ({QUEUE_TYPE_LRN},{QUEUE_TYPE_DAY_LEARN_RELEARN}) and type in ({CARD_TYPE_REV}, {CARD_TYPE_RELEARNING})
|
||||
"""
|
||||
% (intTime(), self.col.usn())
|
||||
)
|
||||
@ -1899,15 +1906,15 @@ due = odue, odue = 0, odid = 0, usn = ? where odid != 0""",
|
||||
self.col.db.execute(
|
||||
f"""
|
||||
update cards set
|
||||
due = %d+ivl, queue = 2, type = 2, mod = %d, usn = %d, odue = 0
|
||||
where queue in (1,{QUEUE_TYPE_DAY_LEARN_RELEARN}) and type in (2, {CARD_TYPE_RELEARNING})
|
||||
due = %d+ivl, queue = {QUEUE_TYPE_REV}, type = {CARD_TYPE_REV}, mod = %d, usn = %d, odue = 0
|
||||
where queue in ({QUEUE_TYPE_LRN},{QUEUE_TYPE_DAY_LEARN_RELEARN}) and type in ({CARD_TYPE_REV}, {CARD_TYPE_RELEARNING})
|
||||
"""
|
||||
% (self.today, intTime(), self.col.usn())
|
||||
)
|
||||
# remove new cards from learning
|
||||
self.forgetCards(
|
||||
self.col.db.list(
|
||||
f"select id from cards where queue in (1,{QUEUE_TYPE_DAY_LEARN_RELEARN})"
|
||||
f"select id from cards where queue in ({QUEUE_TYPE_LRN},{QUEUE_TYPE_DAY_LEARN_RELEARN})"
|
||||
)
|
||||
)
|
||||
|
||||
@ -1916,13 +1923,13 @@ due = odue, odue = 0, odid = 0, usn = ? where odid != 0""",
|
||||
self.col.db.execute(
|
||||
f"""
|
||||
update cards set type = (case
|
||||
when type = 1 then 0
|
||||
when type in (2, {CARD_TYPE_RELEARNING}) then 2
|
||||
when type = {CARD_TYPE_LRN} then {CARD_TYPE_NEW}
|
||||
when type in ({CARD_TYPE_REV}, {CARD_TYPE_RELEARNING}) then {CARD_TYPE_REV}
|
||||
else type end),
|
||||
due = (case when odue then odue else due end),
|
||||
odue = 0,
|
||||
mod = %d, usn = %d
|
||||
where queue < 0"""
|
||||
where queue < {QUEUE_TYPE_NEW}"""
|
||||
% (intTime(), self.col.usn())
|
||||
)
|
||||
|
||||
@ -1936,7 +1943,9 @@ where queue < 0"""
|
||||
# adding 'hard' in v2 scheduler means old ease entries need shifting
|
||||
# up or down
|
||||
def _remapLearningAnswers(self, sql: str) -> None:
|
||||
self.col.db.execute("update revlog set %s and type in (0,2)" % sql)
|
||||
self.col.db.execute(
|
||||
f"update revlog set %s and type in ({CARD_TYPE_NEW},{CARD_TYPE_REV})" % sql
|
||||
)
|
||||
|
||||
def moveToV1(self) -> None:
|
||||
self._emptyAllFiltered()
|
||||
|
@ -6,15 +6,22 @@ import json
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import anki
|
||||
from anki.consts import *
|
||||
from anki.lang import _, ngettext
|
||||
from anki.rsbackend import StringsGroup
|
||||
from anki.utils import fmtTimeSpan, ids2str
|
||||
|
||||
# Card stats
|
||||
##########################################################################
|
||||
|
||||
PERIOD_MONTH = 0
|
||||
PERIOD_YEAR = 1
|
||||
PERIOD_LIFE = 2
|
||||
|
||||
|
||||
class CardStats:
|
||||
def __init__(self, col, card) -> None:
|
||||
def __init__(self, col: anki.storage._Collection, card: anki.cards.Card) -> None:
|
||||
self.col = col
|
||||
self.card = card
|
||||
self.txt = ""
|
||||
@ -30,18 +37,21 @@ class CardStats:
|
||||
if first:
|
||||
self.addLine(_("First Review"), self.date(first / 1000))
|
||||
self.addLine(_("Latest Review"), self.date(last / 1000))
|
||||
if c.type in (1, 2):
|
||||
if c.odid or c.queue < 0:
|
||||
if c.type in (CARD_TYPE_LRN, CARD_TYPE_REV):
|
||||
if c.odid or c.queue < QUEUE_TYPE_NEW:
|
||||
next = None
|
||||
else:
|
||||
if c.queue in (2, 3):
|
||||
if c.queue in (QUEUE_TYPE_REV, QUEUE_TYPE_DAY_LEARN_RELEARN):
|
||||
next = time.time() + ((c.due - self.col.sched.today) * 86400)
|
||||
else:
|
||||
next = c.due
|
||||
next = self.date(next)
|
||||
if next:
|
||||
self.addLine(_("Due"), next)
|
||||
if c.queue == 2:
|
||||
self.addLine(
|
||||
self.col.backend.translate(StringsGroup.STATISTICS, "due-date"),
|
||||
next,
|
||||
)
|
||||
if c.queue == QUEUE_TYPE_REV:
|
||||
self.addLine(_("Interval"), fmt(c.ivl * 86400))
|
||||
self.addLine(_("Ease"), "%d%%" % (c.factor / 10.0))
|
||||
self.addLine(_("Reviews"), "%d" % c.reps)
|
||||
@ -52,7 +62,7 @@ class CardStats:
|
||||
if cnt:
|
||||
self.addLine(_("Average Time"), self.time(total / float(cnt)))
|
||||
self.addLine(_("Total Time"), self.time(total))
|
||||
elif c.queue == 0:
|
||||
elif c.queue == QUEUE_TYPE_NEW:
|
||||
self.addLine(_("Position"), c.due)
|
||||
self.addLine(_("Card Type"), c.template()["name"])
|
||||
self.addLine(_("Note Type"), c.model()["name"])
|
||||
@ -102,14 +112,14 @@ class CollectionStats:
|
||||
def __init__(self, col) -> None:
|
||||
self.col = col
|
||||
self._stats = None
|
||||
self.type = 0
|
||||
self.type = PERIOD_MONTH
|
||||
self.width = 600
|
||||
self.height = 200
|
||||
self.wholeCollection = False
|
||||
|
||||
# assumes jquery & plot are available in document
|
||||
def report(self, type=0) -> str:
|
||||
# 0=days, 1=weeks, 2=months
|
||||
def report(self, type=PERIOD_MONTH) -> str:
|
||||
# 0=month, 1=year, 2=deck life
|
||||
self.type = type
|
||||
from .statsbg import bg
|
||||
|
||||
@ -149,13 +159,13 @@ body {background-image: url(data:image/png;base64,%s); }
|
||||
if lim:
|
||||
lim = " and " + lim
|
||||
cards, thetime, failed, lrn, rev, relrn, filt = self.col.db.first(
|
||||
"""
|
||||
f"""
|
||||
select count(), sum(time)/1000,
|
||||
sum(case when ease = 1 then 1 else 0 end), /* failed */
|
||||
sum(case when type = 0 then 1 else 0 end), /* learning */
|
||||
sum(case when type = 1 then 1 else 0 end), /* review */
|
||||
sum(case when type = 2 then 1 else 0 end), /* relearn */
|
||||
sum(case when type = 3 then 1 else 0 end) /* filter */
|
||||
sum(case when type = {REVLOG_LRN} then 1 else 0 end), /* learning */
|
||||
sum(case when type = {REVLOG_REV} then 1 else 0 end), /* review */
|
||||
sum(case when type = {REVLOG_RELRN} then 1 else 0 end), /* relearn */
|
||||
sum(case when type = {REVLOG_CRAM} then 1 else 0 end) /* filter */
|
||||
from revlog where id > ? """
|
||||
+ lim,
|
||||
(self.col.sched.dayCutoff - 86400) * 1000,
|
||||
@ -215,9 +225,9 @@ from revlog where id > ? """
|
||||
|
||||
def get_start_end_chunk(self, by="review") -> Tuple[int, Optional[int], int]:
|
||||
start = 0
|
||||
if self.type == 0:
|
||||
if self.type == PERIOD_MONTH:
|
||||
end, chunk = 31, 1
|
||||
elif self.type == 1:
|
||||
elif self.type == PERIOD_YEAR:
|
||||
end, chunk = 52, 7
|
||||
else: # self.type == 2:
|
||||
end = None
|
||||
@ -279,8 +289,8 @@ from revlog where id > ? """
|
||||
self._line(i, _("Total"), ngettext("%d review", "%d reviews", tot) % tot)
|
||||
self._line(i, _("Average"), self._avgDay(tot, num, _("reviews")))
|
||||
tomorrow = self.col.db.scalar(
|
||||
"""
|
||||
select count() from cards where did in %s and queue in (2,3)
|
||||
f"""
|
||||
select count() from cards where did in %s and queue in ({QUEUE_TYPE_REV},{QUEUE_TYPE_DAY_LEARN_RELEARN})
|
||||
and due = ?"""
|
||||
% self._limit(),
|
||||
self.col.sched.today + 1,
|
||||
@ -296,12 +306,12 @@ and due = ?"""
|
||||
if end is not None:
|
||||
lim += " and day < %d" % end
|
||||
return self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select (due-:today)/:chunk as day,
|
||||
sum(case when ivl < 21 then 1 else 0 end), -- yng
|
||||
sum(case when ivl >= 21 then 1 else 0 end) -- mtr
|
||||
from cards
|
||||
where did in %s and queue in (2,3)
|
||||
where did in %s and queue in ({QUEUE_TYPE_REV},{QUEUE_TYPE_DAY_LEARN_RELEARN})
|
||||
%s
|
||||
group by day order by day"""
|
||||
% (self._limit(), lim),
|
||||
@ -396,7 +406,7 @@ group by day order by day"""
|
||||
(10, colCram, _("Cram")),
|
||||
),
|
||||
)
|
||||
if self.type == 0:
|
||||
if self.type == PERIOD_MONTH:
|
||||
t = _("Minutes")
|
||||
convHours = False
|
||||
else:
|
||||
@ -513,7 +523,7 @@ group by day order by day"""
|
||||
lim = "where " + " and ".join(lims)
|
||||
else:
|
||||
lim = ""
|
||||
if self.type == 0:
|
||||
if self.type == PERIOD_MONTH:
|
||||
tf = 60.0 # minutes
|
||||
else:
|
||||
tf = 3600.0 # hours
|
||||
@ -543,25 +553,25 @@ group by day order by day"""
|
||||
lim = "where " + " and ".join(lims)
|
||||
else:
|
||||
lim = ""
|
||||
if self.type == 0:
|
||||
if self.type == PERIOD_MONTH:
|
||||
tf = 60.0 # minutes
|
||||
else:
|
||||
tf = 3600.0 # hours
|
||||
return self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select
|
||||
(cast((id/1000.0 - :cut) / 86400.0 as int))/:chunk as day,
|
||||
sum(case when type = 0 then 1 else 0 end), -- lrn count
|
||||
sum(case when type = 1 and lastIvl < 21 then 1 else 0 end), -- yng count
|
||||
sum(case when type = 1 and lastIvl >= 21 then 1 else 0 end), -- mtr count
|
||||
sum(case when type = 2 then 1 else 0 end), -- lapse count
|
||||
sum(case when type = 3 then 1 else 0 end), -- cram count
|
||||
sum(case when type = 0 then time/1000.0 else 0 end)/:tf, -- lrn time
|
||||
sum(case when type = {REVLOG_LRN} then 1 else 0 end), -- lrn count
|
||||
sum(case when type = {REVLOG_REV} and lastIvl < 21 then 1 else 0 end), -- yng count
|
||||
sum(case when type = {REVLOG_REV} and lastIvl >= 21 then 1 else 0 end), -- mtr count
|
||||
sum(case when type = {REVLOG_RELRN} then 1 else 0 end), -- lapse count
|
||||
sum(case when type = {REVLOG_CRAM} then 1 else 0 end), -- cram count
|
||||
sum(case when type = {REVLOG_LRN} then time/1000.0 else 0 end)/:tf, -- lrn time
|
||||
-- yng + mtr time
|
||||
sum(case when type = 1 and lastIvl < 21 then time/1000.0 else 0 end)/:tf,
|
||||
sum(case when type = 1 and lastIvl >= 21 then time/1000.0 else 0 end)/:tf,
|
||||
sum(case when type = 2 then time/1000.0 else 0 end)/:tf, -- lapse time
|
||||
sum(case when type = 3 then time/1000.0 else 0 end)/:tf -- cram time
|
||||
sum(case when type = {REVLOG_REV} and lastIvl < 21 then time/1000.0 else 0 end)/:tf,
|
||||
sum(case when type = {REVLOG_REV} and lastIvl >= 21 then time/1000.0 else 0 end)/:tf,
|
||||
sum(case when type = {REVLOG_RELRN} then time/1000.0 else 0 end)/:tf, -- lapse time
|
||||
sum(case when type = {REVLOG_CRAM} then time/1000.0 else 0 end)/:tf -- cram time
|
||||
from revlog %s
|
||||
group by day order by day"""
|
||||
% lim,
|
||||
@ -606,9 +616,9 @@ group by day order by day)"""
|
||||
for (grp, cnt) in ivls:
|
||||
tot += cnt
|
||||
totd.append((grp, tot / float(all) * 100))
|
||||
if self.type == 0:
|
||||
if self.type == PERIOD_MONTH:
|
||||
ivlmax = 31
|
||||
elif self.type == 1:
|
||||
elif self.type == PERIOD_YEAR:
|
||||
ivlmax = 52
|
||||
else:
|
||||
ivlmax = max(5, ivls[-1][0])
|
||||
@ -643,9 +653,9 @@ group by day order by day)"""
|
||||
lim = "and grp <= %d" % end if end else ""
|
||||
data = [
|
||||
self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select ivl / :chunk as grp, count() from cards
|
||||
where did in %s and queue = 2 %s
|
||||
where did in %s and queue = {QUEUE_TYPE_REV} %s
|
||||
group by grp
|
||||
order by grp"""
|
||||
% (self._limit(), lim),
|
||||
@ -656,8 +666,8 @@ order by grp"""
|
||||
data
|
||||
+ list(
|
||||
self.col.db.first(
|
||||
"""
|
||||
select count(), avg(ivl), max(ivl) from cards where did in %s and queue = 2"""
|
||||
f"""
|
||||
select count(), avg(ivl), max(ivl) from cards where did in %s and queue = {QUEUE_TYPE_REV}"""
|
||||
% self._limit()
|
||||
)
|
||||
),
|
||||
@ -675,9 +685,9 @@ select count(), avg(ivl), max(ivl) from cards where did in %s and queue = 2"""
|
||||
types = ("lrn", "yng", "mtr")
|
||||
eases = self._eases()
|
||||
for (type, ease, cnt) in eases:
|
||||
if type == 1:
|
||||
if type == CARD_TYPE_LRN:
|
||||
ease += 5
|
||||
elif type == 2:
|
||||
elif type == CARD_TYPE_REV:
|
||||
ease += 10
|
||||
n = types[type]
|
||||
d[n].append((ease, cnt))
|
||||
@ -714,7 +724,7 @@ select count(), avg(ivl), max(ivl) from cards where did in %s and queue = 2"""
|
||||
return txt
|
||||
|
||||
def _easeInfo(self, eases) -> str:
|
||||
types = {0: [0, 0], 1: [0, 0], 2: [0, 0]}
|
||||
types = {PERIOD_MONTH: [0, 0], PERIOD_YEAR: [0, 0], PERIOD_LIFE: [0, 0]}
|
||||
for (type, ease, cnt) in eases:
|
||||
if ease == 1:
|
||||
types[type][0] += cnt
|
||||
@ -759,12 +769,12 @@ select count(), avg(ivl), max(ivl) from cards where did in %s and queue = 2"""
|
||||
else:
|
||||
ease4repl = "ease"
|
||||
return self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select (case
|
||||
when type in (0,2) then 0
|
||||
when type in ({REVLOG_LRN},{REVLOG_RELRN}) then 0
|
||||
when lastIvl < 21 then 1
|
||||
else 2 end) as thetype,
|
||||
(case when type in (0,2) and ease = 4 then %s else ease end), count() from revlog %s
|
||||
(case when type in ({REVLOG_LRN},{REVLOG_RELRN}) and ease = 4 then %s else ease end), count() from revlog %s
|
||||
group by thetype, ease
|
||||
order by thetype, ease"""
|
||||
% (ease4repl, lim)
|
||||
@ -853,13 +863,13 @@ order by thetype, ease"""
|
||||
if pd:
|
||||
lim += " and id > %d" % ((self.col.sched.dayCutoff - (86400 * pd)) * 1000)
|
||||
return self.col.db.all(
|
||||
"""
|
||||
f"""
|
||||
select
|
||||
23 - ((cast((:cut - id/1000) / 3600.0 as int)) %% 24) as hour,
|
||||
sum(case when ease = 1 then 0 else 1 end) /
|
||||
cast(count() as float) * 100,
|
||||
count()
|
||||
from revlog where type in (0,1,2) %s
|
||||
from revlog where type in ({REVLOG_LRN},{REVLOG_REV},{REVLOG_RELRN}) %s
|
||||
group by hour having count() > 30 order by hour"""
|
||||
% lim,
|
||||
cut=self.col.sched.dayCutoff - (rolloverHour * 3600),
|
||||
@ -929,23 +939,23 @@ when you answer "good" on a review."""
|
||||
|
||||
def _factors(self) -> Any:
|
||||
return self.col.db.first(
|
||||
"""
|
||||
f"""
|
||||
select
|
||||
min(factor) / 10.0,
|
||||
avg(factor) / 10.0,
|
||||
max(factor) / 10.0
|
||||
from cards where did in %s and queue = 2"""
|
||||
from cards where did in %s and queue = {QUEUE_TYPE_REV}"""
|
||||
% self._limit()
|
||||
)
|
||||
|
||||
def _cards(self) -> Any:
|
||||
return self.col.db.first(
|
||||
"""
|
||||
f"""
|
||||
select
|
||||
sum(case when queue=2 and ivl >= 21 then 1 else 0 end), -- mtr
|
||||
sum(case when queue in (1,3) or (queue=2 and ivl < 21) then 1 else 0 end), -- yng/lrn
|
||||
sum(case when queue=0 then 1 else 0 end), -- new
|
||||
sum(case when queue<0 then 1 else 0 end) -- susp
|
||||
sum(case when queue={QUEUE_TYPE_REV} and ivl >= 21 then 1 else 0 end), -- mtr
|
||||
sum(case when queue in ({QUEUE_TYPE_LRN},{QUEUE_TYPE_DAY_LEARN_RELEARN}) or (queue={QUEUE_TYPE_REV} and ivl < 21) then 1 else 0 end), -- yng/lrn
|
||||
sum(case when queue={QUEUE_TYPE_NEW} then 1 else 0 end), -- new
|
||||
sum(case when queue<{QUEUE_TYPE_NEW} then 1 else 0 end) -- susp
|
||||
from cards where did in %s"""
|
||||
% self._limit()
|
||||
)
|
||||
|
@ -11,6 +11,7 @@ from anki.collection import _Collection
|
||||
from anki.consts import *
|
||||
from anki.db import DB
|
||||
from anki.lang import _
|
||||
from anki.media import media_paths_from_col_path
|
||||
from anki.rsbackend import RustBackend
|
||||
from anki.stdmodels import (
|
||||
addBasicModel,
|
||||
@ -30,8 +31,9 @@ def Collection(
|
||||
path: str, lock: bool = True, server: Optional[ServerData] = None, log: bool = False
|
||||
) -> _Collection:
|
||||
"Open a new or existing collection. Path must be unicode."
|
||||
backend = RustBackend(path)
|
||||
assert path.endswith(".anki2")
|
||||
(media_dir, media_db) = media_paths_from_col_path(path)
|
||||
backend = RustBackend(path, media_dir, media_db)
|
||||
path = os.path.abspath(path)
|
||||
create = not os.path.exists(path)
|
||||
if create:
|
||||
@ -111,7 +113,7 @@ def _upgrade(col, ver) -> None:
|
||||
if ver < 3:
|
||||
# new deck properties
|
||||
for d in col.decks.all():
|
||||
d["dyn"] = 0
|
||||
d["dyn"] = DECK_STD
|
||||
d["collapsed"] = False
|
||||
col.decks.save(d)
|
||||
if ver < 4:
|
||||
|
@ -13,12 +13,11 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import anki
|
||||
from anki.consts import *
|
||||
from anki.db import DB, DBError
|
||||
from anki.db import DB
|
||||
from anki.utils import checksum, devMode, ids2str, intTime, platDesc, versionWithBuild
|
||||
|
||||
from . import hooks
|
||||
from .httpclient import HttpClient
|
||||
from .lang import ngettext
|
||||
|
||||
# add-on compat
|
||||
AnkiRequestsClient = HttpClient
|
||||
@ -679,207 +678,3 @@ class FullSyncer(HttpSyncer):
|
||||
if self.req("upload", open(self.col.path, "rb")) != b"OK":
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Media syncing
|
||||
##########################################################################
|
||||
#
|
||||
# About conflicts:
|
||||
# - to minimize data loss, if both sides are marked for sending and one
|
||||
# side has been deleted, favour the add
|
||||
# - if added/changed on both sides, favour the server version on the
|
||||
# assumption other syncers are in sync with the server
|
||||
#
|
||||
|
||||
|
||||
class MediaSyncer:
|
||||
def __init__(self, col, server=None) -> None:
|
||||
self.col = col
|
||||
self.server = server
|
||||
self.downloadCount = 0
|
||||
|
||||
def sync(self) -> Any:
|
||||
# check if there have been any changes
|
||||
hooks.sync_stage_did_change("findMedia")
|
||||
self.col.log("findChanges")
|
||||
try:
|
||||
self.col.media.findChanges()
|
||||
except DBError:
|
||||
return "corruptMediaDB"
|
||||
|
||||
# begin session and check if in sync
|
||||
lastUsn = self.col.media.lastUsn()
|
||||
ret = self.server.begin()
|
||||
srvUsn = ret["usn"]
|
||||
if lastUsn == srvUsn and not self.col.media.haveDirty():
|
||||
return "noChanges"
|
||||
|
||||
# loop through and process changes from server
|
||||
self.col.log("last local usn is %s" % lastUsn)
|
||||
while True:
|
||||
data = self.server.mediaChanges(lastUsn=lastUsn)
|
||||
|
||||
self.col.log("mediaChanges resp count %d" % len(data))
|
||||
if not data:
|
||||
break
|
||||
|
||||
need = []
|
||||
lastUsn = data[-1][1]
|
||||
for fname, rusn, rsum in data:
|
||||
lsum, ldirty = self.col.media.syncInfo(fname)
|
||||
self.col.log(
|
||||
"check: lsum=%s rsum=%s ldirty=%d rusn=%d fname=%s"
|
||||
% ((lsum and lsum[0:4]), (rsum and rsum[0:4]), ldirty, rusn, fname)
|
||||
)
|
||||
|
||||
if rsum:
|
||||
# added/changed remotely
|
||||
if not lsum or lsum != rsum:
|
||||
self.col.log("will fetch")
|
||||
need.append(fname)
|
||||
else:
|
||||
self.col.log("have same already")
|
||||
if ldirty:
|
||||
self.col.media.markClean([fname])
|
||||
elif lsum:
|
||||
# deleted remotely
|
||||
if not ldirty:
|
||||
self.col.log("delete local")
|
||||
self.col.media.syncDelete(fname)
|
||||
else:
|
||||
# conflict; local add overrides remote delete
|
||||
self.col.log("conflict; will send")
|
||||
else:
|
||||
# deleted both sides
|
||||
self.col.log("both sides deleted")
|
||||
if ldirty:
|
||||
self.col.media.markClean([fname])
|
||||
|
||||
self._downloadFiles(need)
|
||||
|
||||
self.col.log("update last usn to %d" % lastUsn)
|
||||
self.col.media.setLastUsn(lastUsn) # commits
|
||||
|
||||
# at this point we're all up to date with the server's changes,
|
||||
# and we need to send our own
|
||||
|
||||
updateConflict = False
|
||||
toSend = self.col.media.dirtyCount()
|
||||
while True:
|
||||
zip, fnames = self.col.media.mediaChangesZip()
|
||||
if not fnames:
|
||||
break
|
||||
|
||||
hooks.sync_progress_did_change(
|
||||
ngettext(
|
||||
"%d media change to upload", "%d media changes to upload", toSend
|
||||
)
|
||||
% toSend,
|
||||
)
|
||||
|
||||
processedCnt, serverLastUsn = self.server.uploadChanges(zip)
|
||||
self.col.media.markClean(fnames[0:processedCnt])
|
||||
|
||||
self.col.log(
|
||||
"processed %d, serverUsn %d, clientUsn %d"
|
||||
% (processedCnt, serverLastUsn, lastUsn)
|
||||
)
|
||||
|
||||
if serverLastUsn - processedCnt == lastUsn:
|
||||
self.col.log("lastUsn in sync, updating local")
|
||||
lastUsn = serverLastUsn
|
||||
self.col.media.setLastUsn(serverLastUsn) # commits
|
||||
else:
|
||||
self.col.log("concurrent update, skipping usn update")
|
||||
# commit for markClean
|
||||
self.col.media.db.commit()
|
||||
updateConflict = True
|
||||
|
||||
toSend -= processedCnt
|
||||
|
||||
if updateConflict:
|
||||
self.col.log("restart sync due to concurrent update")
|
||||
return self.sync()
|
||||
|
||||
lcnt = self.col.media.mediaCount()
|
||||
ret = self.server.mediaSanity(local=lcnt)
|
||||
if ret == "OK":
|
||||
return "OK"
|
||||
else:
|
||||
self.col.media.forceResync()
|
||||
return ret
|
||||
|
||||
def _downloadFiles(self, fnames) -> None:
|
||||
self.col.log("%d files to fetch" % len(fnames))
|
||||
while fnames:
|
||||
top = fnames[0:SYNC_ZIP_COUNT]
|
||||
self.col.log("fetch %s" % top)
|
||||
zipData = self.server.downloadFiles(files=top)
|
||||
cnt = self.col.media.addFilesFromZip(zipData)
|
||||
self.downloadCount += cnt
|
||||
self.col.log("received %d files" % cnt)
|
||||
fnames = fnames[cnt:]
|
||||
|
||||
n = self.downloadCount
|
||||
hooks.sync_progress_did_change(
|
||||
ngettext("%d media file downloaded", "%d media files downloaded", n)
|
||||
% n,
|
||||
)
|
||||
|
||||
|
||||
# Remote media syncing
|
||||
##########################################################################
|
||||
|
||||
|
||||
class RemoteMediaServer(HttpSyncer):
|
||||
def __init__(self, col, hkey, client, hostNum) -> None:
|
||||
self.col = col
|
||||
HttpSyncer.__init__(self, hkey, client, hostNum=hostNum)
|
||||
self.prefix = "msync/"
|
||||
|
||||
def begin(self) -> Any:
|
||||
self.postVars = dict(
|
||||
k=self.hkey, v="ankidesktop,%s,%s" % (anki.version, platDesc())
|
||||
)
|
||||
ret = self._dataOnly(
|
||||
self.req("begin", io.BytesIO(json.dumps(dict()).encode("utf8")))
|
||||
)
|
||||
self.skey = ret["sk"]
|
||||
return ret
|
||||
|
||||
# args: lastUsn
|
||||
def mediaChanges(self, **kw) -> Any:
|
||||
self.postVars = dict(sk=self.skey,)
|
||||
return self._dataOnly(
|
||||
self.req("mediaChanges", io.BytesIO(json.dumps(kw).encode("utf8")))
|
||||
)
|
||||
|
||||
# args: files
|
||||
def downloadFiles(self, **kw) -> Any:
|
||||
return self.req("downloadFiles", io.BytesIO(json.dumps(kw).encode("utf8")))
|
||||
|
||||
def uploadChanges(self, zip) -> Any:
|
||||
# no compression, as we compress the zip file instead
|
||||
return self._dataOnly(self.req("uploadChanges", io.BytesIO(zip), comp=0))
|
||||
|
||||
# args: local
|
||||
def mediaSanity(self, **kw) -> Any:
|
||||
return self._dataOnly(
|
||||
self.req("mediaSanity", io.BytesIO(json.dumps(kw).encode("utf8")))
|
||||
)
|
||||
|
||||
def _dataOnly(self, resp) -> Any:
|
||||
resp = json.loads(resp.decode("utf8"))
|
||||
if resp["err"]:
|
||||
self.col.log("error returned:%s" % resp["err"])
|
||||
raise Exception("SyncError:%s" % resp["err"])
|
||||
return resp["data"]
|
||||
|
||||
# only for unit tests
|
||||
def mediatest(self, cmd) -> Any:
|
||||
self.postVars = dict(k=self.hkey,)
|
||||
return self._dataOnly(
|
||||
self.req(
|
||||
"newMediaTest", io.BytesIO(json.dumps(dict(cmd=cmd)).encode("utf8"))
|
||||
)
|
||||
)
|
||||
|
@ -34,7 +34,6 @@ from typing import Any, Dict, List, Optional, Tuple
|
||||
import anki
|
||||
from anki import hooks
|
||||
from anki.cards import Card
|
||||
from anki.lang import _
|
||||
from anki.models import NoteType
|
||||
from anki.notes import Note
|
||||
from anki.rsbackend import TemplateReplacementList
|
||||
@ -120,31 +119,14 @@ def render_card(
|
||||
# render
|
||||
try:
|
||||
output = render_card_from_context(ctx)
|
||||
except anki.rsbackend.BackendException as e:
|
||||
# fixme: specific exception in 2.1.21
|
||||
err = e.args[0].template_parse # pylint: disable=no-member
|
||||
if err.q_side:
|
||||
side = _("Front")
|
||||
else:
|
||||
side = _("Back")
|
||||
errmsg = _("{} template has a problem:").format(side) + f"<br>{e}"
|
||||
errmsg += "<br><a href=https://anki.tenderapp.com/kb/problems/card-template-has-a-problem>{}</a>".format(
|
||||
_("More info")
|
||||
)
|
||||
except anki.rsbackend.TemplateError as e:
|
||||
output = TemplateRenderOutput(
|
||||
question_text=errmsg,
|
||||
answer_text=errmsg,
|
||||
question_text=str(e),
|
||||
answer_text=str(e),
|
||||
question_av_tags=[],
|
||||
answer_av_tags=[],
|
||||
)
|
||||
|
||||
if not output.question_text.strip():
|
||||
msg = _("The front of this card is blank.")
|
||||
help = _("More info")
|
||||
helplink = CARD_BLANK_HELP
|
||||
msg += f"<br><a href='{helplink}'>{help}</a>"
|
||||
output.question_text = msg
|
||||
|
||||
hooks.card_did_render(output, ctx)
|
||||
|
||||
return output
|
||||
@ -168,7 +150,7 @@ def fields_for_rendering(col: anki.storage._Collection, card: Card, note: Note):
|
||||
# add special fields
|
||||
fields["Tags"] = note.stringTags().strip()
|
||||
fields["Type"] = card.note_type()["name"]
|
||||
fields["Deck"] = col.decks.name(card.did)
|
||||
fields["Deck"] = col.decks.name(card.odid or card.did)
|
||||
fields["Subdeck"] = fields["Deck"].split("::")[-1]
|
||||
fields["Card"] = card.template()["name"] # type: ignore
|
||||
flag = card.userFlag()
|
||||
|
16
pylib/anki/types.py
Normal file
16
pylib/anki/types.py
Normal file
@ -0,0 +1,16 @@
|
||||
import enum
|
||||
from typing import Any, NoReturn
|
||||
|
||||
|
||||
class _Impossible(enum.Enum):
|
||||
pass
|
||||
|
||||
|
||||
def assert_impossible(arg: NoReturn) -> NoReturn:
|
||||
raise Exception(f"unexpected arg received: {type(arg)} {arg}")
|
||||
|
||||
|
||||
# mypy is not yet smart enough to do exhaustiveness checking on literal types,
|
||||
# so this will fail at runtime instead of typecheck time :-(
|
||||
def assert_impossible_literal(arg: Any) -> NoReturn:
|
||||
raise Exception(f"unexpected arg received: {type(arg)} {arg}")
|
@ -4,6 +4,7 @@ import os
|
||||
import tempfile
|
||||
|
||||
from anki import Collection as aopen
|
||||
from anki.rsbackend import StringsGroup
|
||||
from anki.stdmodels import addBasicModel, models
|
||||
from anki.utils import isWin
|
||||
from tests.shared import assertException, getEmptyCol
|
||||
@ -147,3 +148,17 @@ def test_furigana():
|
||||
m["tmpls"][0]["qfmt"] = "{{kana:}}"
|
||||
mm.save(m)
|
||||
c.q(reload=True)
|
||||
|
||||
|
||||
def test_translate():
|
||||
d = getEmptyCol()
|
||||
tr = d.backend.translate
|
||||
|
||||
# strip off unicode separators
|
||||
def no_uni(s: str) -> str:
|
||||
return s.replace("\u2068", "").replace("\u2069", "")
|
||||
|
||||
assert tr(StringsGroup.TEST, "valid-key") == "a valid key"
|
||||
assert "invalid-key" in tr(StringsGroup.TEST, "invalid-key")
|
||||
assert no_uni(tr(StringsGroup.TEST, "plural", hats=1)) == "You have 1 hat."
|
||||
assert no_uni(tr(StringsGroup.TEST, "plural", hats=2)) == "You have 2 hats."
|
||||
|
@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
import pytest
|
||||
|
||||
from anki.consts import *
|
||||
from anki.find import Finder
|
||||
from tests.shared import getEmptyCol
|
||||
|
||||
@ -91,13 +92,13 @@ def test_findCards():
|
||||
assert len(deck.findCards('"goats are"')) == 1
|
||||
# card states
|
||||
c = f.cards()[0]
|
||||
c.queue = c.type = 2
|
||||
c.queue = c.type = CARD_TYPE_REV
|
||||
assert deck.findCards("is:review") == []
|
||||
c.flush()
|
||||
assert deck.findCards("is:review") == [c.id]
|
||||
assert deck.findCards("is:due") == []
|
||||
c.due = 0
|
||||
c.queue = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.flush()
|
||||
assert deck.findCards("is:due") == [c.id]
|
||||
assert len(deck.findCards("-is:due")) == 4
|
||||
|
@ -3,7 +3,6 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from anki.utils import stripHTML
|
||||
from tests.shared import getEmptyCol
|
||||
|
||||
|
||||
@ -30,7 +29,7 @@ def test_latex():
|
||||
# fix path
|
||||
anki.latex.pngCommands[0][0] = "latex"
|
||||
# check media db should cause latex to be generated
|
||||
d.media.check()
|
||||
d.media.render_all_latex()
|
||||
assert len(os.listdir(d.media.dir())) == 1
|
||||
assert ".png" in f.cards()[0].q()
|
||||
# adding new notes should cause generation on question display
|
||||
@ -47,13 +46,12 @@ def test_latex():
|
||||
oldcard = f.cards()[0]
|
||||
assert ".png" in oldcard.q()
|
||||
# if we turn off building, then previous cards should work, but cards with
|
||||
# missing media will show the latex
|
||||
# missing media will show a broken image
|
||||
anki.latex.build = False
|
||||
f = d.newNote()
|
||||
f["Front"] = "[latex]foo[/latex]"
|
||||
d.addNote(f)
|
||||
assert len(os.listdir(d.media.dir())) == 2
|
||||
assert stripHTML(f.cards()[0].q()) == "[latex]foo[/latex]"
|
||||
assert ".png" in oldcard.q()
|
||||
# turn it on again so other test don't suffer
|
||||
anki.latex.build = True
|
||||
|
@ -17,10 +17,10 @@ def test_add():
|
||||
assert d.media.addFile(path) == "foo.jpg"
|
||||
# adding the same file again should not create a duplicate
|
||||
assert d.media.addFile(path) == "foo.jpg"
|
||||
# but if it has a different md5, it should
|
||||
# but if it has a different sha1, it should
|
||||
with open(path, "w") as f:
|
||||
f.write("world")
|
||||
assert d.media.addFile(path) == "foo (1).jpg"
|
||||
assert d.media.addFile(path) == "foo-7c211433f02071597741e6ff5a8ea34789abbf43.jpg"
|
||||
|
||||
|
||||
def test_strings():
|
||||
@ -73,65 +73,8 @@ def test_deckIntegration():
|
||||
with open(os.path.join(d.media.dir(), "foo.jpg"), "w") as f:
|
||||
f.write("test")
|
||||
# check media
|
||||
d.close()
|
||||
ret = d.media.check()
|
||||
assert ret[0] == ["fake2.png"]
|
||||
assert ret[1] == ["foo.jpg"]
|
||||
|
||||
|
||||
def test_changes():
|
||||
d = getEmptyCol()
|
||||
|
||||
def added():
|
||||
return d.media.db.execute("select fname from media where csum is not null")
|
||||
|
||||
def removed():
|
||||
return d.media.db.execute("select fname from media where csum is null")
|
||||
|
||||
def advanceTime():
|
||||
d.media.db.execute("update media set mtime=mtime-1")
|
||||
d.media.db.execute("update meta set dirMod = dirMod - 1")
|
||||
|
||||
assert not list(added())
|
||||
assert not list(removed())
|
||||
# add a file
|
||||
dir = tempfile.mkdtemp(prefix="anki")
|
||||
path = os.path.join(dir, "foo.jpg")
|
||||
with open(path, "w") as f:
|
||||
f.write("hello")
|
||||
path = d.media.addFile(path)
|
||||
# should have been logged
|
||||
d.media.findChanges()
|
||||
assert list(added())
|
||||
assert not list(removed())
|
||||
# if we modify it, the cache won't notice
|
||||
advanceTime()
|
||||
with open(path, "w") as f:
|
||||
f.write("world")
|
||||
assert len(list(added())) == 1
|
||||
assert not list(removed())
|
||||
# but if we add another file, it will
|
||||
advanceTime()
|
||||
with open(path + "2", "w") as f:
|
||||
f.write("yo")
|
||||
d.media.findChanges()
|
||||
assert len(list(added())) == 2
|
||||
assert not list(removed())
|
||||
# deletions should get noticed too
|
||||
advanceTime()
|
||||
os.unlink(path + "2")
|
||||
d.media.findChanges()
|
||||
assert len(list(added())) == 1
|
||||
assert len(list(removed())) == 1
|
||||
|
||||
|
||||
def test_illegal():
|
||||
d = getEmptyCol()
|
||||
aString = "a:b|cd\\e/f\0g*h"
|
||||
good = "abcdefgh"
|
||||
assert d.media.stripIllegal(aString) == good
|
||||
for c in aString:
|
||||
bad = d.media.hasIllegal("somestring" + c + "morestring")
|
||||
if bad:
|
||||
assert c not in good
|
||||
else:
|
||||
assert c in good
|
||||
d.reopen()
|
||||
assert ret.missing == ["fake2.png"]
|
||||
assert ret.unused == ["foo.jpg"]
|
||||
|
@ -223,7 +223,7 @@ def test_typecloze():
|
||||
d = getEmptyCol()
|
||||
m = d.models.byName("Cloze")
|
||||
d.models.setCurrent(m)
|
||||
m["tmpls"][0]["qfmt"] = "{{type:cloze:Text}}"
|
||||
m["tmpls"][0]["qfmt"] = "{{cloze:Text}}{{type:cloze:Text}}"
|
||||
d.models.save(m)
|
||||
f = d.newNote()
|
||||
f["Text"] = "hello {{c1::world}}"
|
||||
|
@ -4,7 +4,7 @@ import copy
|
||||
import time
|
||||
|
||||
from anki import hooks
|
||||
from anki.consts import STARTING_FACTOR
|
||||
from anki.consts import *
|
||||
from anki.utils import intTime
|
||||
from tests.shared import getEmptyCol as getEmptyColOrig
|
||||
|
||||
@ -46,13 +46,13 @@ def test_new():
|
||||
# fetch it
|
||||
c = d.sched.getCard()
|
||||
assert c
|
||||
assert c.queue == 0
|
||||
assert c.type == 0
|
||||
assert c.queue == QUEUE_TYPE_NEW
|
||||
assert c.type == CARD_TYPE_NEW
|
||||
# if we answer it, it should become a learn card
|
||||
t = intTime()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 1
|
||||
assert c.type == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_LRN
|
||||
assert c.due >= t
|
||||
|
||||
# disabled for now, as the learn fudging makes this randomly fail
|
||||
@ -163,11 +163,11 @@ def test_learn():
|
||||
assert c.left % 1000 == 1
|
||||
assert c.left // 1000 == 1
|
||||
# the next pass should graduate the card
|
||||
assert c.queue == 1
|
||||
assert c.type == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_LRN
|
||||
d.sched.answerCard(c, 2)
|
||||
assert c.queue == 2
|
||||
assert c.type == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
assert c.type == CARD_TYPE_REV
|
||||
# should be due tomorrow, with an interval of 1
|
||||
assert c.due == d.sched.today + 1
|
||||
assert c.ivl == 1
|
||||
@ -175,27 +175,27 @@ def test_learn():
|
||||
c.type = 0
|
||||
c.queue = 1
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.type == 2
|
||||
assert c.queue == 2
|
||||
assert c.type == CARD_TYPE_REV
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
assert checkRevIvl(d, c, 4)
|
||||
# revlog should have been updated each time
|
||||
assert d.db.scalar("select count() from revlog where type = 0") == 5
|
||||
# now failed card handling
|
||||
c.type = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = 1
|
||||
c.odue = 123
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.due == 123
|
||||
assert c.type == 2
|
||||
assert c.queue == 2
|
||||
assert c.type == CARD_TYPE_REV
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
# we should be able to remove manually, too
|
||||
c.type = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = 1
|
||||
c.odue = 321
|
||||
c.flush()
|
||||
d.sched.removeLrn()
|
||||
c.load()
|
||||
assert c.queue == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
assert c.due == 321
|
||||
|
||||
|
||||
@ -247,7 +247,7 @@ def test_learn_day():
|
||||
# answering it will place it in queue 3
|
||||
d.sched.answerCard(c, 2)
|
||||
assert c.due == d.sched.today + 1
|
||||
assert c.queue == 3
|
||||
assert c.queue == CARD_TYPE_RELEARNING
|
||||
assert not d.sched.getCard()
|
||||
# for testing, move it back a day
|
||||
c.due -= 1
|
||||
@ -259,7 +259,7 @@ def test_learn_day():
|
||||
assert ni(c, 2) == 86400 * 2
|
||||
# if we fail it, it should be back in the correct queue
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
d.undo()
|
||||
d.reset()
|
||||
c = d.sched.getCard()
|
||||
@ -271,7 +271,7 @@ def test_learn_day():
|
||||
# the last pass should graduate it into a review card
|
||||
assert ni(c, 2) == 86400
|
||||
d.sched.answerCard(c, 2)
|
||||
assert c.queue == c.type == 2
|
||||
assert c.queue == CARD_TYPE_REV and c.type == QUEUE_TYPE_REV
|
||||
# if the lapse step is tomorrow, failing it should handle the counts
|
||||
# correctly
|
||||
c.due = 0
|
||||
@ -281,7 +281,7 @@ def test_learn_day():
|
||||
d.sched._cardConf(c)["lapse"]["delays"] = [1440]
|
||||
c = d.sched.getCard()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 3
|
||||
assert c.queue == CARD_TYPE_RELEARNING
|
||||
assert d.sched.counts() == (0, 0, 0)
|
||||
|
||||
|
||||
@ -294,8 +294,8 @@ def test_reviews():
|
||||
d.addNote(f)
|
||||
# set the card up as a review card, due 8 days ago
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = d.sched.today - 8
|
||||
c.factor = STARTING_FACTOR
|
||||
c.reps = 3
|
||||
@ -311,7 +311,7 @@ def test_reviews():
|
||||
d.reset()
|
||||
d.sched._cardConf(c)["lapse"]["delays"] = [2, 20]
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
# it should be due tomorrow, with an interval of 1
|
||||
assert c.odue == d.sched.today + 1
|
||||
assert c.ivl == 1
|
||||
@ -333,7 +333,7 @@ def test_reviews():
|
||||
c = copy.copy(cardcopy)
|
||||
c.flush()
|
||||
d.sched.answerCard(c, 2)
|
||||
assert c.queue == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
# the new interval should be (100 + 8/4) * 1.2 = 122
|
||||
assert checkRevIvl(d, c, 122)
|
||||
assert c.due == d.sched.today + c.ivl
|
||||
@ -376,9 +376,9 @@ def test_reviews():
|
||||
hooks.card_did_leech.append(onLeech)
|
||||
d.sched.answerCard(c, 1)
|
||||
assert hooked
|
||||
assert c.queue == -1
|
||||
assert c.queue == QUEUE_TYPE_SUSPENDED
|
||||
c.load()
|
||||
assert c.queue == -1
|
||||
assert c.queue == QUEUE_TYPE_SUSPENDED
|
||||
|
||||
|
||||
def test_button_spacing():
|
||||
@ -388,8 +388,8 @@ def test_button_spacing():
|
||||
d.addNote(f)
|
||||
# 1 day ivl review card due now
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = d.sched.today
|
||||
c.reps = 1
|
||||
c.ivl = 1
|
||||
@ -412,7 +412,7 @@ def test_overdue_lapse():
|
||||
d.addNote(f)
|
||||
# simulate a review that was lapsed and is now due for its normal review
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = 1
|
||||
c.due = -1
|
||||
c.odue = -1
|
||||
@ -492,7 +492,7 @@ def test_nextIvl():
|
||||
assert ni(c, 3) == 4 * 86400
|
||||
# lapsed cards
|
||||
##################################################
|
||||
c.type = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.factor = STARTING_FACTOR
|
||||
assert ni(c, 1) == 60
|
||||
@ -500,7 +500,7 @@ def test_nextIvl():
|
||||
assert ni(c, 3) == 100 * 86400
|
||||
# review cards
|
||||
##################################################
|
||||
c.queue = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.factor = STARTING_FACTOR
|
||||
# failing it should put it at 60s
|
||||
@ -551,20 +551,20 @@ def test_suspend():
|
||||
# should cope with rev cards being relearnt
|
||||
c.due = 0
|
||||
c.ivl = 100
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.flush()
|
||||
d.reset()
|
||||
c = d.sched.getCard()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.due >= time.time()
|
||||
assert c.queue == 1
|
||||
assert c.type == 2
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_REV
|
||||
d.sched.suspendCards([c.id])
|
||||
d.sched.unsuspendCards([c.id])
|
||||
c.load()
|
||||
assert c.queue == 2
|
||||
assert c.type == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
assert c.type == CARD_TYPE_REV
|
||||
assert c.due == 1
|
||||
# should cope with cards in cram decks
|
||||
c.due = 1
|
||||
@ -587,7 +587,8 @@ def test_cram():
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.ivl = 100
|
||||
c.type = c.queue = 2
|
||||
c.queue = CARD_TYPE_REV
|
||||
c.type = QUEUE_TYPE_REV
|
||||
# due in 25 days, so it's been waiting 75 days
|
||||
c.due = d.sched.today + 25
|
||||
c.mod = 1
|
||||
@ -622,7 +623,7 @@ def test_cram():
|
||||
# int(75*1.85) = 138
|
||||
assert c.ivl == 138
|
||||
assert c.odue == 138
|
||||
assert c.queue == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
# should be logged as a cram rep
|
||||
assert d.db.scalar("select type from revlog order by id desc limit 1") == 3
|
||||
# check ivls again
|
||||
@ -634,7 +635,7 @@ def test_cram():
|
||||
d.sched.answerCard(c, 2)
|
||||
assert c.ivl == 138
|
||||
assert c.due == 138
|
||||
assert c.queue == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
# and it will have moved back to the previous deck
|
||||
assert c.did == 1
|
||||
# cram the deck again
|
||||
@ -702,12 +703,12 @@ def test_cram_rem():
|
||||
c = d.sched.getCard()
|
||||
d.sched.answerCard(c, 2)
|
||||
# answering the card will put it in the learning queue
|
||||
assert c.type == c.queue == 1
|
||||
assert c.type == CARD_TYPE_LRN and c.queue == QUEUE_TYPE_LRN
|
||||
assert c.due != oldDue
|
||||
# if we terminate cramming prematurely it should be set back to new
|
||||
d.sched.emptyDyn(did)
|
||||
c.load()
|
||||
assert c.type == c.queue == 0
|
||||
assert c.type == CARD_TYPE_NEW and c.queue == QUEUE_TYPE_NEW
|
||||
assert c.due == oldDue
|
||||
|
||||
|
||||
@ -731,10 +732,11 @@ def test_cram_resched():
|
||||
assert ni(c, 3) == 0
|
||||
assert d.sched.nextIvlStr(c, 3) == "(end)"
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.queue == c.type == 0
|
||||
assert c.type == CARD_TYPE_NEW and c.queue == QUEUE_TYPE_NEW
|
||||
# undue reviews should also be unaffected
|
||||
c.ivl = 100
|
||||
c.type = c.queue = 2
|
||||
c.queue = CARD_TYPE_REV
|
||||
c.type = QUEUE_TYPE_REV
|
||||
c.due = d.sched.today + 25
|
||||
c.factor = STARTING_FACTOR
|
||||
c.flush()
|
||||
@ -911,8 +913,8 @@ def test_repCounts():
|
||||
f["Front"] = "three"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = d.sched.today
|
||||
c.flush()
|
||||
d.reset()
|
||||
@ -929,8 +931,8 @@ def test_timing():
|
||||
f["Front"] = "num" + str(i)
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = 0
|
||||
c.flush()
|
||||
# fail the first one
|
||||
@ -941,7 +943,7 @@ def test_timing():
|
||||
d.sched.answerCard(c, 1)
|
||||
# the next card should be another review
|
||||
c = d.sched.getCard()
|
||||
assert c.queue == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
# but if we wait for a second, the failed card should come back
|
||||
orig_time = time.time
|
||||
|
||||
@ -950,7 +952,7 @@ def test_timing():
|
||||
|
||||
time.time = adjusted_time
|
||||
c = d.sched.getCard()
|
||||
assert c.queue == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
time.time = orig_time
|
||||
|
||||
|
||||
@ -982,7 +984,7 @@ def test_deckDue():
|
||||
d.addNote(f)
|
||||
# make it a review card
|
||||
c = f.cards()[0]
|
||||
c.queue = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = 0
|
||||
c.flush()
|
||||
# add one more with a new deck
|
||||
@ -1100,8 +1102,8 @@ def test_forget():
|
||||
f["Front"] = "one"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.queue = 2
|
||||
c.type = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.type = CARD_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.due = 0
|
||||
c.flush()
|
||||
@ -1122,7 +1124,7 @@ def test_resched():
|
||||
c.load()
|
||||
assert c.due == d.sched.today
|
||||
assert c.ivl == 1
|
||||
assert c.queue == c.type == 2
|
||||
assert c.queue == CARD_TYPE_REV and c.type == QUEUE_TYPE_REV
|
||||
d.sched.reschedCards([c.id], 1, 1)
|
||||
c.load()
|
||||
assert c.due == d.sched.today + 1
|
||||
@ -1136,8 +1138,8 @@ def test_norelearn():
|
||||
f["Front"] = "one"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = 0
|
||||
c.factor = STARTING_FACTOR
|
||||
c.reps = 3
|
||||
@ -1158,8 +1160,8 @@ def test_failmult():
|
||||
f["Back"] = "two"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.due = d.sched.today - c.ivl
|
||||
c.factor = STARTING_FACTOR
|
||||
|
@ -4,7 +4,7 @@ import copy
|
||||
import time
|
||||
|
||||
from anki import hooks
|
||||
from anki.consts import STARTING_FACTOR
|
||||
from anki.consts import *
|
||||
from anki.utils import intTime
|
||||
from tests.shared import getEmptyCol as getEmptyColOrig
|
||||
|
||||
@ -57,13 +57,13 @@ def test_new():
|
||||
# fetch it
|
||||
c = d.sched.getCard()
|
||||
assert c
|
||||
assert c.queue == 0
|
||||
assert c.type == 0
|
||||
assert c.queue == QUEUE_TYPE_NEW
|
||||
assert c.type == CARD_TYPE_NEW
|
||||
# if we answer it, it should become a learn card
|
||||
t = intTime()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 1
|
||||
assert c.type == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_LRN
|
||||
assert c.due >= t
|
||||
|
||||
# disabled for now, as the learn fudging makes this randomly fail
|
||||
@ -176,11 +176,11 @@ def test_learn():
|
||||
assert c.left % 1000 == 1
|
||||
assert c.left // 1000 == 1
|
||||
# the next pass should graduate the card
|
||||
assert c.queue == 1
|
||||
assert c.type == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_LRN
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.queue == 2
|
||||
assert c.type == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
assert c.type == CARD_TYPE_REV
|
||||
# should be due tomorrow, with an interval of 1
|
||||
assert c.due == d.sched.today + 1
|
||||
assert c.ivl == 1
|
||||
@ -188,8 +188,8 @@ def test_learn():
|
||||
c.type = 0
|
||||
c.queue = 1
|
||||
d.sched.answerCard(c, 4)
|
||||
assert c.type == 2
|
||||
assert c.queue == 2
|
||||
assert c.type == CARD_TYPE_REV
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
assert checkRevIvl(d, c, 4)
|
||||
# revlog should have been updated each time
|
||||
assert d.db.scalar("select count() from revlog where type = 0") == 5
|
||||
@ -203,20 +203,21 @@ def test_relearn():
|
||||
c = f.cards()[0]
|
||||
c.ivl = 100
|
||||
c.due = d.sched.today
|
||||
c.type = c.queue = 2
|
||||
c.queue = CARD_TYPE_REV
|
||||
c.type = QUEUE_TYPE_REV
|
||||
c.flush()
|
||||
|
||||
# fail the card
|
||||
d.reset()
|
||||
c = d.sched.getCard()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 1
|
||||
assert c.type == 3
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_RELEARNING
|
||||
assert c.ivl == 1
|
||||
|
||||
# immediately graduate it
|
||||
d.sched.answerCard(c, 4)
|
||||
assert c.queue == c.type == 2
|
||||
assert c.queue == CARD_TYPE_REV and c.type == QUEUE_TYPE_REV
|
||||
assert c.ivl == 2
|
||||
assert c.due == d.sched.today + c.ivl
|
||||
|
||||
@ -229,7 +230,8 @@ def test_relearn_no_steps():
|
||||
c = f.cards()[0]
|
||||
c.ivl = 100
|
||||
c.due = d.sched.today
|
||||
c.type = c.queue = 2
|
||||
c.queue = CARD_TYPE_REV
|
||||
c.type = QUEUE_TYPE_REV
|
||||
c.flush()
|
||||
|
||||
conf = d.decks.confForDid(1)
|
||||
@ -240,7 +242,7 @@ def test_relearn_no_steps():
|
||||
d.reset()
|
||||
c = d.sched.getCard()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.type == c.queue == 2
|
||||
assert c.queue == CARD_TYPE_REV and c.type == QUEUE_TYPE_REV
|
||||
|
||||
|
||||
def test_learn_collapsed():
|
||||
@ -291,7 +293,7 @@ def test_learn_day():
|
||||
# answering it will place it in queue 3
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.due == d.sched.today + 1
|
||||
assert c.queue == 3
|
||||
assert c.queue == QUEUE_TYPE_DAY_LEARN_RELEARN
|
||||
assert not d.sched.getCard()
|
||||
# for testing, move it back a day
|
||||
c.due -= 1
|
||||
@ -303,7 +305,7 @@ def test_learn_day():
|
||||
assert ni(c, 3) == 86400 * 2
|
||||
# if we fail it, it should be back in the correct queue
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
d.undo()
|
||||
d.reset()
|
||||
c = d.sched.getCard()
|
||||
@ -315,7 +317,7 @@ def test_learn_day():
|
||||
# the last pass should graduate it into a review card
|
||||
assert ni(c, 3) == 86400
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.queue == c.type == 2
|
||||
assert c.queue == CARD_TYPE_REV and c.type == QUEUE_TYPE_REV
|
||||
# if the lapse step is tomorrow, failing it should handle the counts
|
||||
# correctly
|
||||
c.due = 0
|
||||
@ -325,7 +327,7 @@ def test_learn_day():
|
||||
d.sched._cardConf(c)["lapse"]["delays"] = [1440]
|
||||
c = d.sched.getCard()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.queue == 3
|
||||
assert c.queue == QUEUE_TYPE_DAY_LEARN_RELEARN
|
||||
assert d.sched.counts() == (0, 0, 0)
|
||||
|
||||
|
||||
@ -338,8 +340,8 @@ def test_reviews():
|
||||
d.addNote(f)
|
||||
# set the card up as a review card, due 8 days ago
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = d.sched.today - 8
|
||||
c.factor = STARTING_FACTOR
|
||||
c.reps = 3
|
||||
@ -355,7 +357,7 @@ def test_reviews():
|
||||
c.flush()
|
||||
d.reset()
|
||||
d.sched.answerCard(c, 2)
|
||||
assert c.queue == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
# the new interval should be (100) * 1.2 = 120
|
||||
assert checkRevIvl(d, c, 120)
|
||||
assert c.due == d.sched.today + c.ivl
|
||||
@ -398,9 +400,9 @@ def test_reviews():
|
||||
hooks.card_did_leech.append(onLeech)
|
||||
d.sched.answerCard(c, 1)
|
||||
assert hooked
|
||||
assert c.queue == -1
|
||||
assert c.queue == QUEUE_TYPE_SUSPENDED
|
||||
c.load()
|
||||
assert c.queue == -1
|
||||
assert c.queue == QUEUE_TYPE_SUSPENDED
|
||||
|
||||
|
||||
def test_review_limits():
|
||||
@ -432,7 +434,8 @@ def test_review_limits():
|
||||
|
||||
# make them reviews
|
||||
c = f.cards()[0]
|
||||
c.queue = c.type = 2
|
||||
c.queue = CARD_TYPE_REV
|
||||
c.type = QUEUE_TYPE_REV
|
||||
c.due = 0
|
||||
c.flush()
|
||||
|
||||
@ -474,8 +477,8 @@ def test_button_spacing():
|
||||
d.addNote(f)
|
||||
# 1 day ivl review card due now
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = d.sched.today
|
||||
c.reps = 1
|
||||
c.ivl = 1
|
||||
@ -503,7 +506,7 @@ def test_overdue_lapse():
|
||||
d.addNote(f)
|
||||
# simulate a review that was lapsed and is now due for its normal review
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = 1
|
||||
c.due = -1
|
||||
c.odue = -1
|
||||
@ -586,7 +589,7 @@ def test_nextIvl():
|
||||
assert ni(c, 4) == 4 * 86400
|
||||
# lapsed cards
|
||||
##################################################
|
||||
c.type = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.factor = STARTING_FACTOR
|
||||
assert ni(c, 1) == 60
|
||||
@ -594,7 +597,7 @@ def test_nextIvl():
|
||||
assert ni(c, 4) == 101 * 86400
|
||||
# review cards
|
||||
##################################################
|
||||
c.queue = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.factor = STARTING_FACTOR
|
||||
# failing it should put it at 60s
|
||||
@ -624,25 +627,25 @@ def test_bury():
|
||||
# burying
|
||||
d.sched.buryCards([c.id], manual=True) # pylint: disable=unexpected-keyword-arg
|
||||
c.load()
|
||||
assert c.queue == -3
|
||||
assert c.queue == QUEUE_TYPE_MANUALLY_BURIED
|
||||
d.sched.buryCards([c2.id], manual=False) # pylint: disable=unexpected-keyword-arg
|
||||
c2.load()
|
||||
assert c2.queue == -2
|
||||
assert c2.queue == QUEUE_TYPE_SIBLING_BURIED
|
||||
|
||||
d.reset()
|
||||
assert not d.sched.getCard()
|
||||
|
||||
d.sched.unburyCardsForDeck(type="manual") # pylint: disable=unexpected-keyword-arg
|
||||
c.load()
|
||||
assert c.queue == 0
|
||||
assert c.queue == QUEUE_TYPE_NEW
|
||||
c2.load()
|
||||
assert c2.queue == -2
|
||||
assert c2.queue == QUEUE_TYPE_SIBLING_BURIED
|
||||
|
||||
d.sched.unburyCardsForDeck( # pylint: disable=unexpected-keyword-arg
|
||||
type="siblings"
|
||||
)
|
||||
c2.load()
|
||||
assert c2.queue == 0
|
||||
assert c2.queue == QUEUE_TYPE_NEW
|
||||
|
||||
d.sched.buryCards([c.id, c2.id])
|
||||
d.sched.unburyCardsForDeck(type="all") # pylint: disable=unexpected-keyword-arg
|
||||
@ -671,21 +674,21 @@ def test_suspend():
|
||||
# should cope with rev cards being relearnt
|
||||
c.due = 0
|
||||
c.ivl = 100
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.flush()
|
||||
d.reset()
|
||||
c = d.sched.getCard()
|
||||
d.sched.answerCard(c, 1)
|
||||
assert c.due >= time.time()
|
||||
due = c.due
|
||||
assert c.queue == 1
|
||||
assert c.type == 3
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_RELEARNING
|
||||
d.sched.suspendCards([c.id])
|
||||
d.sched.unsuspendCards([c.id])
|
||||
c.load()
|
||||
assert c.queue == 1
|
||||
assert c.type == 3
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
assert c.type == CARD_TYPE_RELEARNING
|
||||
assert c.due == due
|
||||
# should cope with cards in cram decks
|
||||
c.due = 1
|
||||
@ -709,7 +712,8 @@ def test_filt_reviewing_early_normal():
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.ivl = 100
|
||||
c.type = c.queue = 2
|
||||
c.queue = CARD_TYPE_REV
|
||||
c.type = QUEUE_TYPE_REV
|
||||
# due in 25 days, so it's been waiting 75 days
|
||||
c.due = d.sched.today + 25
|
||||
c.mod = 1
|
||||
@ -740,7 +744,7 @@ def test_filt_reviewing_early_normal():
|
||||
assert c.due == d.sched.today + c.ivl
|
||||
assert not c.odue
|
||||
# should not be in learning
|
||||
assert c.queue == 2
|
||||
assert c.queue == QUEUE_TYPE_REV
|
||||
# should be logged as a cram rep
|
||||
assert d.db.scalar("select type from revlog order by id desc limit 1") == 3
|
||||
|
||||
@ -771,11 +775,11 @@ def test_filt_keep_lrn_state():
|
||||
|
||||
d.sched.answerCard(c, 1)
|
||||
|
||||
assert c.type == c.queue == 1
|
||||
assert c.type == CARD_TYPE_LRN and c.queue == QUEUE_TYPE_LRN
|
||||
assert c.left == 3003
|
||||
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.type == c.queue == 1
|
||||
assert c.type == CARD_TYPE_LRN and c.queue == QUEUE_TYPE_LRN
|
||||
|
||||
# create a dynamic deck and refresh it
|
||||
did = d.decks.newDyn("Cram")
|
||||
@ -784,7 +788,7 @@ def test_filt_keep_lrn_state():
|
||||
|
||||
# card should still be in learning state
|
||||
c.load()
|
||||
assert c.type == c.queue == 1
|
||||
assert c.type == CARD_TYPE_LRN and c.queue == QUEUE_TYPE_LRN
|
||||
assert c.left == 2002
|
||||
|
||||
# should be able to advance learning steps
|
||||
@ -795,7 +799,7 @@ def test_filt_keep_lrn_state():
|
||||
# emptying the deck preserves learning state
|
||||
d.sched.emptyDyn(did)
|
||||
c.load()
|
||||
assert c.type == c.queue == 1
|
||||
assert c.type == CARD_TYPE_LRN and c.queue == QUEUE_TYPE_LRN
|
||||
assert c.left == 1001
|
||||
assert c.due - intTime() > 60 * 60
|
||||
|
||||
@ -833,9 +837,9 @@ def test_preview():
|
||||
|
||||
# passing it will remove it
|
||||
d.sched.answerCard(c2, 2)
|
||||
assert c2.queue == 0
|
||||
assert c2.queue == QUEUE_TYPE_NEW
|
||||
assert c2.reps == 0
|
||||
assert c2.type == 0
|
||||
assert c2.type == CARD_TYPE_NEW
|
||||
|
||||
# the other card should appear again
|
||||
c = d.sched.getCard()
|
||||
@ -844,9 +848,9 @@ def test_preview():
|
||||
# emptying the filtered deck should restore card
|
||||
d.sched.emptyDyn(did)
|
||||
c.load()
|
||||
assert c.queue == 0
|
||||
assert c.queue == QUEUE_TYPE_NEW
|
||||
assert c.reps == 0
|
||||
assert c.type == 0
|
||||
assert c.type == CARD_TYPE_NEW
|
||||
|
||||
|
||||
def test_ordcycle():
|
||||
@ -943,8 +947,8 @@ def test_repCounts():
|
||||
f["Front"] = "three"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = d.sched.today
|
||||
c.flush()
|
||||
d.reset()
|
||||
@ -961,8 +965,8 @@ def test_timing():
|
||||
f["Front"] = "num" + str(i)
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = 0
|
||||
c.flush()
|
||||
# fail the first one
|
||||
@ -971,13 +975,13 @@ def test_timing():
|
||||
d.sched.answerCard(c, 1)
|
||||
# the next card should be another review
|
||||
c2 = d.sched.getCard()
|
||||
assert c2.queue == 2
|
||||
assert c2.queue == QUEUE_TYPE_REV
|
||||
# if the failed card becomes due, it should show first
|
||||
c.due = time.time() - 1
|
||||
c.flush()
|
||||
d.reset()
|
||||
c = d.sched.getCard()
|
||||
assert c.queue == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
|
||||
|
||||
def test_collapse():
|
||||
@ -1008,7 +1012,7 @@ def test_deckDue():
|
||||
d.addNote(f)
|
||||
# make it a review card
|
||||
c = f.cards()[0]
|
||||
c.queue = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = 0
|
||||
c.flush()
|
||||
# add one more with a new deck
|
||||
@ -1126,8 +1130,8 @@ def test_forget():
|
||||
f["Front"] = "one"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.queue = 2
|
||||
c.type = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.type = CARD_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.due = 0
|
||||
c.flush()
|
||||
@ -1148,7 +1152,7 @@ def test_resched():
|
||||
c.load()
|
||||
assert c.due == d.sched.today
|
||||
assert c.ivl == 1
|
||||
assert c.queue == c.type == 2
|
||||
assert c.queue == QUEUE_TYPE_REV and c.type == CARD_TYPE_REV
|
||||
d.sched.reschedCards([c.id], 1, 1)
|
||||
c.load()
|
||||
assert c.due == d.sched.today + 1
|
||||
@ -1162,8 +1166,8 @@ def test_norelearn():
|
||||
f["Front"] = "one"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.due = 0
|
||||
c.factor = STARTING_FACTOR
|
||||
c.reps = 3
|
||||
@ -1184,8 +1188,8 @@ def test_failmult():
|
||||
f["Back"] = "two"
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.type = 2
|
||||
c.queue = 2
|
||||
c.type = CARD_TYPE_REV
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.ivl = 100
|
||||
c.due = d.sched.today - c.ivl
|
||||
c.factor = STARTING_FACTOR
|
||||
@ -1217,8 +1221,8 @@ def test_moveVersions():
|
||||
# the move to v2 should reset it to new
|
||||
col.changeSchedulerVer(2)
|
||||
c.load()
|
||||
assert c.queue == 0
|
||||
assert c.type == 0
|
||||
assert c.queue == QUEUE_TYPE_NEW
|
||||
assert c.type == CARD_TYPE_NEW
|
||||
|
||||
# fail it again, and manually bury it
|
||||
col.reset()
|
||||
@ -1226,19 +1230,19 @@ def test_moveVersions():
|
||||
col.sched.answerCard(c, 1)
|
||||
col.sched.buryCards([c.id])
|
||||
c.load()
|
||||
assert c.queue == -3
|
||||
assert c.queue == QUEUE_TYPE_MANUALLY_BURIED
|
||||
|
||||
# revert to version 1
|
||||
col.changeSchedulerVer(1)
|
||||
|
||||
# card should have moved queues
|
||||
c.load()
|
||||
assert c.queue == -2
|
||||
assert c.queue == QUEUE_TYPE_SIBLING_BURIED
|
||||
|
||||
# and it should be new again when unburied
|
||||
col.sched.unburyCards()
|
||||
c.load()
|
||||
assert c.queue == c.type == 0
|
||||
assert c.type == CARD_TYPE_NEW and c.queue == QUEUE_TYPE_NEW
|
||||
|
||||
# make sure relearning cards transition correctly to v1
|
||||
col.changeSchedulerVer(2)
|
||||
@ -1269,7 +1273,7 @@ def test_negativeDueFilter():
|
||||
d.addNote(f)
|
||||
c = f.cards()[0]
|
||||
c.due = -5
|
||||
c.queue = 2
|
||||
c.queue = QUEUE_TYPE_REV
|
||||
c.ivl = 5
|
||||
c.flush()
|
||||
|
||||
|
@ -55,18 +55,18 @@ def test_review():
|
||||
# answer
|
||||
assert d.sched.counts() == (1, 0, 0)
|
||||
c = d.sched.getCard()
|
||||
assert c.queue == 0
|
||||
assert c.queue == QUEUE_TYPE_NEW
|
||||
d.sched.answerCard(c, 3)
|
||||
assert c.left == 1001
|
||||
assert d.sched.counts() == (0, 1, 0)
|
||||
assert c.queue == 1
|
||||
assert c.queue == QUEUE_TYPE_LRN
|
||||
# undo
|
||||
assert d.undoName()
|
||||
d.undo()
|
||||
d.reset()
|
||||
assert d.sched.counts() == (1, 0, 0)
|
||||
c.load()
|
||||
assert c.queue == 0
|
||||
assert c.queue == QUEUE_TYPE_NEW
|
||||
assert c.left != 1001
|
||||
assert not d.undoName()
|
||||
# we should be able to undo multiple answers too
|
||||
|
@ -50,6 +50,12 @@ hooks = [
|
||||
),
|
||||
Hook(name="sync_stage_did_change", args=["stage: str"], legacy_hook="sync"),
|
||||
Hook(name="sync_progress_did_change", args=["msg: str"], legacy_hook="syncMsg"),
|
||||
Hook(
|
||||
name="bg_thread_progress_callback",
|
||||
args=["proceed: bool", "progress: anki.rsbackend.Progress"],
|
||||
return_type="bool",
|
||||
doc="Warning: this is called on a background thread.",
|
||||
),
|
||||
Hook(
|
||||
name="tag_added", args=["tag: str"], legacy_hook="newTag", legacy_no_args=True,
|
||||
),
|
||||
@ -67,6 +73,16 @@ hooks = [
|
||||
Your add-on can check filter_name to decide whether it should modify
|
||||
field_text or not before returning it.""",
|
||||
),
|
||||
Hook(
|
||||
name="note_will_flush",
|
||||
args=["note: Note"],
|
||||
doc="Allow to change a note before it is added/updated in the database.",
|
||||
),
|
||||
Hook(
|
||||
name="card_will_flush",
|
||||
args=["card: Card"],
|
||||
doc="Allow to change a card before it is added/updated in the database.",
|
||||
),
|
||||
Hook(
|
||||
name="card_did_render",
|
||||
args=[
|
||||
|
@ -139,7 +139,7 @@ class {self.classname()}:
|
||||
if self.legacy_hook:
|
||||
out += f"""\
|
||||
# legacy support
|
||||
runFilter({self.legacy_args()})
|
||||
{arg_names[0]} = runFilter({self.legacy_args()})
|
||||
"""
|
||||
|
||||
out += f"""\
|
||||
|
@ -25,8 +25,8 @@ all: check
|
||||
./tools/build_ui.sh
|
||||
@touch $@
|
||||
|
||||
.build/i18n: $(wildcard i18n/translations/anki.pot/*)
|
||||
(cd i18n && ./build-mo-files && ./copy-qt-files)
|
||||
.build/i18n: $(wildcard i18n/po/desktop/*/anki.po) $(wildcard i18n/ftl/core/*/*.ftl)
|
||||
(cd i18n && ./pull-git && ./build-mo-files && ./copy-qt-files && ./copy-ftl-files)
|
||||
@touch $@
|
||||
|
||||
TSDEPS := $(wildcard ts/src/*.ts) $(wildcard ts/scss/*.scss)
|
||||
|
@ -64,7 +64,7 @@ except ImportError as e:
|
||||
|
||||
|
||||
from aqt import addcards, browser, editcurrent # isort:skip
|
||||
from aqt import stats, about, preferences # isort:skip
|
||||
from aqt import stats, about, preferences, mediasync # isort:skip
|
||||
|
||||
|
||||
class DialogManager:
|
||||
@ -76,6 +76,7 @@ class DialogManager:
|
||||
"DeckStats": [stats.DeckStats, None],
|
||||
"About": [about.show, None],
|
||||
"Preferences": [preferences.Preferences, None],
|
||||
"sync_log": [mediasync.MediaSyncDialog, None],
|
||||
}
|
||||
|
||||
def open(self, name, *args):
|
||||
@ -147,8 +148,8 @@ def setupLang(
|
||||
locale.setlocale(locale.LC_ALL, "")
|
||||
except:
|
||||
pass
|
||||
lang = force or pm.meta["defaultLang"]
|
||||
|
||||
# add _ and ngettext globals used by legacy code
|
||||
def fn__(arg):
|
||||
print("accessing _ without importing from anki.lang will break in the future")
|
||||
print("".join(traceback.format_stack()[-2]))
|
||||
@ -167,15 +168,26 @@ def setupLang(
|
||||
|
||||
builtins.__dict__["_"] = fn__
|
||||
builtins.__dict__["ngettext"] = fn_ngettext
|
||||
|
||||
# get lang and normalize into ja/zh-CN form
|
||||
lang = force or pm.meta["defaultLang"]
|
||||
lang = anki.lang.lang_to_disk_lang(lang)
|
||||
|
||||
# load gettext catalog
|
||||
ldir = locale_dir()
|
||||
anki.lang.setLang(lang, ldir, local=False)
|
||||
anki.lang.set_lang(lang, ldir)
|
||||
|
||||
# switch direction for RTL languages
|
||||
if lang in ("he", "ar", "fa"):
|
||||
app.setLayoutDirection(Qt.RightToLeft)
|
||||
else:
|
||||
app.setLayoutDirection(Qt.LeftToRight)
|
||||
# qt
|
||||
|
||||
# load qt translations
|
||||
_qtrans = QTranslator()
|
||||
if _qtrans.load("qt_" + lang, ldir):
|
||||
qt_dir = os.path.join(ldir, "qt")
|
||||
qt_lang = lang.replace("-", "_")
|
||||
if _qtrans.load("qtbase_" + qt_lang, qt_dir):
|
||||
app.installTranslator(_qtrans)
|
||||
|
||||
|
||||
|
@ -219,5 +219,5 @@ suggestions, bug reports and donations."
|
||||
abt.label.setMinimumWidth(800)
|
||||
abt.label.setMinimumHeight(600)
|
||||
dialog.show()
|
||||
abt.label.stdHtml(abouttext, js=" ")
|
||||
abt.label.stdHtml(abouttext, js=[])
|
||||
return dialog
|
||||
|
@ -725,7 +725,7 @@ class AddonsDialog(QDialog):
|
||||
if min is not None and min > current_point_version:
|
||||
return f"Anki >= 2.1.{min}"
|
||||
else:
|
||||
max = addon.max_point_version
|
||||
max = abs(addon.max_point_version)
|
||||
return f"Anki <= 2.1.{max}"
|
||||
|
||||
def should_grey(self, addon: AddonMeta):
|
||||
|
@ -11,6 +11,7 @@ import sre_constants
|
||||
import time
|
||||
import unicodedata
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from operator import itemgetter
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
@ -23,6 +24,7 @@ from anki.consts import *
|
||||
from anki.lang import _, ngettext
|
||||
from anki.models import NoteType
|
||||
from anki.notes import Note
|
||||
from anki.rsbackend import StringsGroup
|
||||
from anki.utils import fmtTimeSpan, htmlToTextLine, ids2str, intTime, isMac, isWin
|
||||
from aqt import AnkiQt, gui_hooks
|
||||
from aqt.editor import Editor
|
||||
@ -50,6 +52,7 @@ from aqt.utils import (
|
||||
showInfo,
|
||||
showWarning,
|
||||
tooltip,
|
||||
tr,
|
||||
)
|
||||
from aqt.webview import AnkiWebView
|
||||
|
||||
@ -350,11 +353,13 @@ class DataModel(QAbstractTableModel):
|
||||
def nextDue(self, c, index):
|
||||
if c.odid:
|
||||
return _("(filtered)")
|
||||
elif c.queue == 1:
|
||||
elif c.queue == QUEUE_TYPE_LRN:
|
||||
date = c.due
|
||||
elif c.queue == 0 or c.type == 0:
|
||||
return str(c.due)
|
||||
elif c.queue in (2, 3) or (c.type == 2 and c.queue < 0):
|
||||
elif c.queue == QUEUE_TYPE_NEW or c.type == CARD_TYPE_NEW:
|
||||
return tr(StringsGroup.STATISTICS, "due-for-new-card", number=c.due)
|
||||
elif c.queue in (QUEUE_TYPE_REV, QUEUE_TYPE_DAY_LEARN_RELEARN) or (
|
||||
c.type == CARD_TYPE_REV and c.queue < 0
|
||||
):
|
||||
date = time.time() + ((c.due - self.col.sched.today) * 86400)
|
||||
else:
|
||||
return ""
|
||||
@ -416,6 +421,15 @@ class StatusDelegate(QItemDelegate):
|
||||
######################################################################
|
||||
|
||||
|
||||
class SidebarStage(Enum):
|
||||
ROOT = 0
|
||||
STANDARD = 1
|
||||
FAVORITES = 2
|
||||
DECKS = 3
|
||||
MODELS = 4
|
||||
TAGS = 5
|
||||
|
||||
|
||||
class SidebarItem:
|
||||
def __init__(
|
||||
self,
|
||||
@ -716,7 +730,7 @@ class Browser(QMainWindow):
|
||||
("noteCrt", _("Created")),
|
||||
("noteMod", _("Edited")),
|
||||
("cardMod", _("Changed")),
|
||||
("cardDue", _("Due")),
|
||||
("cardDue", tr(StringsGroup.STATISTICS, "due-date")),
|
||||
("cardIvl", _("Interval")),
|
||||
("cardEase", _("Ease")),
|
||||
("cardReps", _("Reviews")),
|
||||
@ -776,10 +790,11 @@ class Browser(QMainWindow):
|
||||
def search(self) -> None:
|
||||
if "is:current" in self._lastSearchTxt:
|
||||
# show current card if there is one
|
||||
c = self.mw.reviewer.card
|
||||
self.card = self.mw.reviewer.card
|
||||
c = self.card = self.mw.reviewer.card
|
||||
nid = c and c.nid or 0
|
||||
self.model.search("nid:%d" % nid)
|
||||
if nid:
|
||||
self.model.search("nid:%d" % nid)
|
||||
self.focusCid(c.id)
|
||||
else:
|
||||
self.model.search(self._lastSearchTxt)
|
||||
|
||||
@ -1083,11 +1098,27 @@ by clicking on one on the left."""
|
||||
|
||||
def buildTree(self) -> SidebarItem:
|
||||
root = SidebarItem("", "")
|
||||
self._stdTree(root)
|
||||
self._favTree(root)
|
||||
self._decksTree(root)
|
||||
self._modelTree(root)
|
||||
self._userTagTree(root)
|
||||
|
||||
handled = gui_hooks.browser_will_build_tree(
|
||||
False, root, SidebarStage.ROOT, self
|
||||
)
|
||||
if handled:
|
||||
return root
|
||||
|
||||
for stage, builder in zip(
|
||||
list(SidebarStage)[1:],
|
||||
(
|
||||
self._stdTree,
|
||||
self._favTree,
|
||||
self._decksTree,
|
||||
self._modelTree,
|
||||
self._userTagTree,
|
||||
),
|
||||
):
|
||||
handled = gui_hooks.browser_will_build_tree(False, root, stage, self)
|
||||
if not handled and builder:
|
||||
builder(root)
|
||||
|
||||
return root
|
||||
|
||||
def _stdTree(self, root) -> None:
|
||||
@ -1241,7 +1272,7 @@ by clicking on one on the left."""
|
||||
(_("New"), "is:new"),
|
||||
(_("Learning"), "is:learn"),
|
||||
(_("Review"), "is:review"),
|
||||
(_("Due"), "is:due"),
|
||||
(tr(StringsGroup.FILTERING, "is-due"), "is:due"),
|
||||
None,
|
||||
(_("Suspended"), "is:suspended"),
|
||||
(_("Buried"), "is:buried"),
|
||||
@ -1382,27 +1413,20 @@ by clicking on one on the left."""
|
||||
info, cs = self._cardInfoData()
|
||||
reps = self._revlogData(cs)
|
||||
|
||||
class CardInfoDialog(QDialog):
|
||||
silentlyClose = True
|
||||
|
||||
def reject(self):
|
||||
saveGeom(self, "revlog")
|
||||
return QDialog.reject(self)
|
||||
|
||||
d = CardInfoDialog(self)
|
||||
card_info_dialog = CardInfoDialog(self)
|
||||
l = QVBoxLayout()
|
||||
l.setContentsMargins(0, 0, 0, 0)
|
||||
w = AnkiWebView()
|
||||
w = AnkiWebView(title="browser card info")
|
||||
l.addWidget(w)
|
||||
w.stdHtml(info + "<p>" + reps)
|
||||
w.stdHtml(info + "<p>" + reps, context=card_info_dialog)
|
||||
bb = QDialogButtonBox(QDialogButtonBox.Close)
|
||||
l.addWidget(bb)
|
||||
bb.rejected.connect(d.reject)
|
||||
d.setLayout(l)
|
||||
d.setWindowModality(Qt.WindowModal)
|
||||
d.resize(500, 400)
|
||||
restoreGeom(d, "revlog")
|
||||
d.show()
|
||||
bb.rejected.connect(card_info_dialog.reject)
|
||||
card_info_dialog.setLayout(l)
|
||||
card_info_dialog.setWindowModality(Qt.WindowModal)
|
||||
card_info_dialog.resize(500, 400)
|
||||
restoreGeom(card_info_dialog, "revlog")
|
||||
card_info_dialog.show()
|
||||
|
||||
def _cardInfoData(self):
|
||||
from anki.stats import CardStats
|
||||
@ -1446,9 +1470,9 @@ border: 1px solid #000; padding: 3px; '>%s</div>"""
|
||||
import anki.stats as st
|
||||
|
||||
fmt = "<span style='color:%s'>%s</span>"
|
||||
if type == 0:
|
||||
if type == CARD_TYPE_NEW:
|
||||
tstr = fmt % (st.colLearn, tstr)
|
||||
elif type == 1:
|
||||
elif type == CARD_TYPE_LRN:
|
||||
tstr = fmt % (st.colMature, tstr)
|
||||
elif type == 2:
|
||||
tstr = fmt % (st.colRelearn, tstr)
|
||||
@ -1561,7 +1585,7 @@ where id in %s"""
|
||||
self._previewWindow.silentlyClose = True
|
||||
vbox = QVBoxLayout()
|
||||
vbox.setContentsMargins(0, 0, 0, 0)
|
||||
self._previewWeb = AnkiWebView()
|
||||
self._previewWeb = AnkiWebView(title="previewer")
|
||||
vbox.addWidget(self._previewWeb)
|
||||
bbox = QDialogButtonBox()
|
||||
|
||||
@ -1656,12 +1680,15 @@ where id in %s"""
|
||||
"mathjax/MathJax.js",
|
||||
"reviewer.js",
|
||||
]
|
||||
web_context = PreviewDialog(dialog=self._previewWindow, browser=self)
|
||||
self._previewWeb.stdHtml(
|
||||
self.mw.reviewer.revHtml(), css=["reviewer.css"], js=jsinc
|
||||
self.mw.reviewer.revHtml(),
|
||||
css=["reviewer.css"],
|
||||
js=jsinc,
|
||||
context=web_context,
|
||||
)
|
||||
self._previewWeb.set_bridge_command(
|
||||
self._on_preview_bridge_cmd,
|
||||
PreviewDialog(dialog=self._previewWindow, browser=self),
|
||||
self._on_preview_bridge_cmd, web_context,
|
||||
)
|
||||
|
||||
def _on_preview_bridge_cmd(self, cmd: str) -> Any:
|
||||
@ -1737,7 +1764,7 @@ where id in %s"""
|
||||
av_player.play_tags(audio)
|
||||
|
||||
txt = self.mw.prepare_card_text_for_display(txt)
|
||||
gui_hooks.card_will_show(
|
||||
txt = gui_hooks.card_will_show(
|
||||
txt, c, "preview" + self._previewState.capitalize()
|
||||
)
|
||||
self._lastPreviewState = self._previewStateAndMod()
|
||||
@ -1965,7 +1992,8 @@ update cards set usn=?, mod=?, did=? where id in """
|
||||
def _reposition(self):
|
||||
cids = self.selectedCards()
|
||||
cids2 = self.col.db.list(
|
||||
"select id from cards where type = 0 and id in " + ids2str(cids)
|
||||
f"select id from cards where type = {CARD_TYPE_NEW} and id in "
|
||||
+ ids2str(cids)
|
||||
)
|
||||
if not cids2:
|
||||
return showInfo(_("Only new cards can be repositioned."))
|
||||
@ -1974,7 +2002,7 @@ update cards set usn=?, mod=?, did=? where id in """
|
||||
frm = aqt.forms.reposition.Ui_Dialog()
|
||||
frm.setupUi(d)
|
||||
(pmin, pmax) = self.col.db.first(
|
||||
"select min(due), max(due) from cards where type=0 and odid=0"
|
||||
f"select min(due), max(due) from cards where type={CARD_TYPE_NEW} and odid=0"
|
||||
)
|
||||
pmin = pmin or 0
|
||||
pmax = pmax or 0
|
||||
@ -2158,10 +2186,10 @@ update cards set usn=?, mod=?, did=? where id in """
|
||||
frm.fields.addItems(fields)
|
||||
self._dupesButton = None
|
||||
# links
|
||||
frm.webView.set_bridge_command(
|
||||
self.dupeLinkClicked, FindDupesDialog(dialog=d, browser=self)
|
||||
)
|
||||
frm.webView.stdHtml("")
|
||||
frm.webView.title = "find duplicates"
|
||||
web_context = FindDupesDialog(dialog=d, browser=self)
|
||||
frm.webView.set_bridge_command(self.dupeLinkClicked, web_context)
|
||||
frm.webView.stdHtml("", context=web_context)
|
||||
|
||||
def onFin(code):
|
||||
saveGeom(d, "findDupes")
|
||||
@ -2170,13 +2198,15 @@ update cards set usn=?, mod=?, did=? where id in """
|
||||
|
||||
def onClick():
|
||||
field = fields[frm.fields.currentIndex()]
|
||||
self.duplicatesReport(frm.webView, field, frm.search.text(), frm)
|
||||
self.duplicatesReport(
|
||||
frm.webView, field, frm.search.text(), frm, web_context
|
||||
)
|
||||
|
||||
search = frm.buttonBox.addButton(_("Search"), QDialogButtonBox.ActionRole)
|
||||
search.clicked.connect(onClick)
|
||||
d.show()
|
||||
|
||||
def duplicatesReport(self, web, fname, search, frm):
|
||||
def duplicatesReport(self, web, fname, search, frm, web_context):
|
||||
self.mw.progress.start()
|
||||
res = self.mw.col.findDupes(fname, search)
|
||||
if not self._dupesButton:
|
||||
@ -2201,7 +2231,7 @@ update cards set usn=?, mod=?, did=? where id in """
|
||||
)
|
||||
)
|
||||
t += "</ol>"
|
||||
web.stdHtml(t)
|
||||
web.stdHtml(t, context=web_context)
|
||||
self.mw.progress.finish()
|
||||
|
||||
def _onTagDupes(self, res):
|
||||
@ -2475,3 +2505,19 @@ Are you sure you want to continue?"""
|
||||
|
||||
def onHelp(self):
|
||||
openHelp("browsermisc")
|
||||
|
||||
|
||||
# Card Info Dialog
|
||||
######################################################################
|
||||
|
||||
|
||||
class CardInfoDialog(QDialog):
|
||||
silentlyClose = True
|
||||
|
||||
def __init__(self, browser: Browser, *args, **kwargs):
|
||||
super().__init__(browser, *args, **kwargs)
|
||||
self.browser = browser
|
||||
|
||||
def reject(self):
|
||||
saveGeom(self, "revlog")
|
||||
return QDialog.reject(self)
|
||||
|
@ -203,9 +203,9 @@ class CardLayout(QDialog):
|
||||
|
||||
def setupWebviews(self):
|
||||
pform = self.pform
|
||||
pform.frontWeb = AnkiWebView()
|
||||
pform.frontWeb = AnkiWebView(title="card layout front")
|
||||
pform.frontPrevBox.addWidget(pform.frontWeb)
|
||||
pform.backWeb = AnkiWebView()
|
||||
pform.backWeb = AnkiWebView(title="card layout back")
|
||||
pform.backPrevBox.addWidget(pform.backWeb)
|
||||
jsinc = [
|
||||
"jquery.js",
|
||||
@ -215,10 +215,10 @@ class CardLayout(QDialog):
|
||||
"reviewer.js",
|
||||
]
|
||||
pform.frontWeb.stdHtml(
|
||||
self.mw.reviewer.revHtml(), css=["reviewer.css"], js=jsinc
|
||||
self.mw.reviewer.revHtml(), css=["reviewer.css"], js=jsinc, context=self,
|
||||
)
|
||||
pform.backWeb.stdHtml(
|
||||
self.mw.reviewer.revHtml(), css=["reviewer.css"], js=jsinc
|
||||
self.mw.reviewer.revHtml(), css=["reviewer.css"], js=jsinc, context=self,
|
||||
)
|
||||
pform.frontWeb.set_bridge_command(self._on_bridge_cmd, self)
|
||||
pform.backWeb.set_bridge_command(self._on_bridge_cmd, self)
|
||||
|
@ -32,17 +32,17 @@ class CustomStudy(QDialog):
|
||||
f.setupUi(self)
|
||||
self.setWindowModality(Qt.WindowModal)
|
||||
self.setupSignals()
|
||||
f.radio1.click()
|
||||
f.radioNew.click()
|
||||
self.exec_()
|
||||
|
||||
def setupSignals(self):
|
||||
f = self.form
|
||||
f.radio1.clicked.connect(lambda: self.onRadioChange(1))
|
||||
f.radio2.clicked.connect(lambda: self.onRadioChange(2))
|
||||
f.radio3.clicked.connect(lambda: self.onRadioChange(3))
|
||||
f.radio4.clicked.connect(lambda: self.onRadioChange(4))
|
||||
f.radio5.clicked.connect(lambda: self.onRadioChange(5))
|
||||
f.radio6.clicked.connect(lambda: self.onRadioChange(6))
|
||||
f.radioNew.clicked.connect(lambda: self.onRadioChange(RADIO_NEW))
|
||||
f.radioRev.clicked.connect(lambda: self.onRadioChange(RADIO_REV))
|
||||
f.radioForgot.clicked.connect(lambda: self.onRadioChange(RADIO_FORGOT))
|
||||
f.radioAhead.clicked.connect(lambda: self.onRadioChange(RADIO_AHEAD))
|
||||
f.radioPreview.clicked.connect(lambda: self.onRadioChange(RADIO_PREVIEW))
|
||||
f.radioCram.clicked.connect(lambda: self.onRadioChange(RADIO_CRAM))
|
||||
|
||||
def onRadioChange(self, idx):
|
||||
f = self.form
|
||||
|
@ -5,17 +5,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import aqt
|
||||
from anki.errors import DeckRenameError
|
||||
from anki.lang import _, ngettext
|
||||
from anki.rsbackend import StringsGroup
|
||||
from anki.utils import fmtTimeSpan, ids2str
|
||||
from aqt import AnkiQt, gui_hooks
|
||||
from aqt.qt import *
|
||||
from aqt.sound import av_player
|
||||
from aqt.toolbar import BottomBar
|
||||
from aqt.utils import askUser, getOnlyText, openHelp, openLink, shortcut, showWarning
|
||||
from aqt.utils import (
|
||||
askUser,
|
||||
getOnlyText,
|
||||
openHelp,
|
||||
openLink,
|
||||
shortcut,
|
||||
showWarning,
|
||||
tr,
|
||||
)
|
||||
|
||||
|
||||
class DeckBrowserBottomBar:
|
||||
@ -23,6 +33,22 @@ class DeckBrowserBottomBar:
|
||||
self.deck_browser = deck_browser
|
||||
|
||||
|
||||
@dataclass
|
||||
class DeckBrowserContent:
|
||||
"""Stores sections of HTML content that the deck browser will be
|
||||
populated with.
|
||||
|
||||
Attributes:
|
||||
tree {str} -- HTML of the deck tree section
|
||||
stats {str} -- HTML of the stats section
|
||||
countwarn {str} -- HTML of the deck count warning section
|
||||
"""
|
||||
|
||||
tree: str
|
||||
stats: str
|
||||
countwarn: str
|
||||
|
||||
|
||||
class DeckBrowser:
|
||||
_dueTree: Any
|
||||
|
||||
@ -103,12 +129,17 @@ class DeckBrowser:
|
||||
gui_hooks.deck_browser_did_render(self)
|
||||
|
||||
def __renderPage(self, offset):
|
||||
tree = self._renderDeckTree(self._dueTree)
|
||||
stats = self._renderStats()
|
||||
content = DeckBrowserContent(
|
||||
tree=self._renderDeckTree(self._dueTree),
|
||||
stats=self._renderStats(),
|
||||
countwarn=self._countWarn(),
|
||||
)
|
||||
gui_hooks.deck_browser_will_render_content(self, content)
|
||||
self.web.stdHtml(
|
||||
self._body % dict(tree=tree, stats=stats, countwarn=self._countWarn()),
|
||||
self._body % content.__dict__,
|
||||
css=["deckbrowser.css"],
|
||||
js=["jquery.js", "jquery-ui.js", "deckbrowser.js"],
|
||||
context=self,
|
||||
)
|
||||
self.web.key = "deckBrowser"
|
||||
self._drawButtons()
|
||||
@ -159,7 +190,7 @@ where id > ?""",
|
||||
<tr><th colspan=5 align=left>%s</th><th class=count>%s</th>
|
||||
<th class=count>%s</th><th class=optscol></th></tr>""" % (
|
||||
_("Deck"),
|
||||
_("Due"),
|
||||
tr(StringsGroup.STATISTICS, "due-count"),
|
||||
_("New"),
|
||||
)
|
||||
buf += self._topLevelDragRow()
|
||||
@ -340,9 +371,10 @@ where id > ?""",
|
||||
<button title='%s' onclick='pycmd(\"%s\");'>%s</button>""" % tuple(
|
||||
b
|
||||
)
|
||||
self.bottom.draw(buf)
|
||||
self.bottom.web.set_bridge_command(
|
||||
self._linkHandler, DeckBrowserBottomBar(self)
|
||||
self.bottom.draw(
|
||||
buf=buf,
|
||||
link_handler=self._linkHandler,
|
||||
web_context=DeckBrowserBottomBar(self),
|
||||
)
|
||||
|
||||
def _onShared(self):
|
||||
|
@ -95,7 +95,6 @@ class Editor:
|
||||
|
||||
def setupWeb(self) -> None:
|
||||
self.web = EditorWebView(self.widget, self)
|
||||
self.web.title = "editor"
|
||||
self.web.allowDrops = True
|
||||
self.web.set_bridge_command(self.onBridgeCmd, self)
|
||||
self.outerLayout.addWidget(self.web, 1)
|
||||
@ -167,6 +166,7 @@ class Editor:
|
||||
_html % (bgcol, bgcol, topbuts, _("Show Duplicates")),
|
||||
css=["editor.css"],
|
||||
js=["jquery.js", "editor.js"],
|
||||
context=self,
|
||||
)
|
||||
|
||||
# Top buttons
|
||||
@ -793,8 +793,11 @@ to a cloze type first, via Edit>Change Note Type."""
|
||||
self.mw.progress.finish()
|
||||
# strip off any query string
|
||||
url = re.sub(r"\?.*?$", "", url)
|
||||
path = urllib.parse.unquote(url)
|
||||
return self.mw.col.media.writeData(path, filecontents, typeHint=ct)
|
||||
fname = os.path.basename(urllib.parse.unquote(url))
|
||||
if ct:
|
||||
fname = self.mw.col.media.add_extension_based_on_mime(fname, ct)
|
||||
|
||||
return self.mw.col.media.write_data(fname, filecontents)
|
||||
|
||||
# Paste/drag&drop
|
||||
######################################################################
|
||||
@ -937,7 +940,7 @@ to a cloze type first, via Edit>Change Note Type."""
|
||||
|
||||
class EditorWebView(AnkiWebView):
|
||||
def __init__(self, parent, editor):
|
||||
AnkiWebView.__init__(self)
|
||||
AnkiWebView.__init__(self, title="editor")
|
||||
self.editor = editor
|
||||
self.strip = self.editor.mw.pm.profile["stripHTML"]
|
||||
self.setAcceptDrops(True)
|
||||
|
@ -7,7 +7,7 @@ See pylib/anki/hooks.py
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Callable, Dict, List, Tuple
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
import anki
|
||||
import aqt
|
||||
@ -203,6 +203,105 @@ class _BrowserMenusDidInitHook:
|
||||
browser_menus_did_init = _BrowserMenusDidInitHook()
|
||||
|
||||
|
||||
class _BrowserWillBuildTreeFilter:
|
||||
"""Used to add or replace items in the browser sidebar tree
|
||||
|
||||
'tree' is the root SidebarItem that all other items are added to.
|
||||
|
||||
'stage' is an enum describing the different construction stages of
|
||||
the sidebar tree at which you can interject your changes.
|
||||
The different values can be inspected by looking at
|
||||
aqt.browser.SidebarStage.
|
||||
|
||||
If you want Anki to proceed with the construction of the tree stage
|
||||
in question after your have performed your changes or additions,
|
||||
return the 'handled' boolean unchanged.
|
||||
|
||||
On the other hand, if you want to prevent Anki from adding its own
|
||||
items at a particular construction stage (e.g. in case your add-on
|
||||
implements its own version of that particular stage), return 'True'.
|
||||
|
||||
If you return 'True' at SidebarStage.ROOT, the sidebar will not be
|
||||
populated by any of the other construction stages. For any other stage
|
||||
the tree construction will just continue as usual.
|
||||
|
||||
For example, if your code wishes to replace the tag tree, you could do:
|
||||
|
||||
def on_browser_will_build_tree(handled, root, stage, browser):
|
||||
if stage != SidebarStage.TAGS:
|
||||
# not at tag tree building stage, pass on
|
||||
return handled
|
||||
|
||||
# your tag tree construction code
|
||||
# root.addChild(...)
|
||||
|
||||
# your code handled tag tree construction, no need for Anki
|
||||
# or other add-ons to build the tag tree
|
||||
return True
|
||||
"""
|
||||
|
||||
_hooks: List[
|
||||
Callable[
|
||||
[
|
||||
bool,
|
||||
"aqt.browser.SidebarItem",
|
||||
"aqt.browser.SidebarStage",
|
||||
"aqt.browser.Browser",
|
||||
],
|
||||
bool,
|
||||
]
|
||||
] = []
|
||||
|
||||
def append(
|
||||
self,
|
||||
cb: Callable[
|
||||
[
|
||||
bool,
|
||||
"aqt.browser.SidebarItem",
|
||||
"aqt.browser.SidebarStage",
|
||||
"aqt.browser.Browser",
|
||||
],
|
||||
bool,
|
||||
],
|
||||
) -> None:
|
||||
"""(handled: bool, tree: aqt.browser.SidebarItem, stage: aqt.browser.SidebarStage, browser: aqt.browser.Browser)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(
|
||||
self,
|
||||
cb: Callable[
|
||||
[
|
||||
bool,
|
||||
"aqt.browser.SidebarItem",
|
||||
"aqt.browser.SidebarStage",
|
||||
"aqt.browser.Browser",
|
||||
],
|
||||
bool,
|
||||
],
|
||||
) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
handled: bool,
|
||||
tree: aqt.browser.SidebarItem,
|
||||
stage: aqt.browser.SidebarStage,
|
||||
browser: aqt.browser.Browser,
|
||||
) -> bool:
|
||||
for filter in self._hooks:
|
||||
try:
|
||||
handled = filter(handled, tree, stage, browser)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(filter)
|
||||
raise
|
||||
return handled
|
||||
|
||||
|
||||
browser_will_build_tree = _BrowserWillBuildTreeFilter()
|
||||
|
||||
|
||||
class _BrowserWillShowContextMenuHook:
|
||||
_hooks: List[Callable[["aqt.browser.Browser", QMenu], None]] = []
|
||||
|
||||
@ -336,6 +435,63 @@ class _DeckBrowserDidRenderHook:
|
||||
deck_browser_did_render = _DeckBrowserDidRenderHook()
|
||||
|
||||
|
||||
class _DeckBrowserWillRenderContentHook:
|
||||
"""Used to modify HTML content sections in the deck browser body
|
||||
|
||||
'content' contains the sections of HTML content the deck browser body
|
||||
will be updated with.
|
||||
|
||||
When modifying the content of a particular section, please make sure your
|
||||
changes only perform the minimum required edits to make your add-on work.
|
||||
You should avoid overwriting or interfering with existing data as much
|
||||
as possible, instead opting to append your own changes, e.g.:
|
||||
|
||||
def on_deck_browser_will_render_content(deck_browser, content):
|
||||
content.stats += "
|
||||
<div>my html</div>"
|
||||
"""
|
||||
|
||||
_hooks: List[
|
||||
Callable[
|
||||
["aqt.deckbrowser.DeckBrowser", "aqt.deckbrowser.DeckBrowserContent"], None
|
||||
]
|
||||
] = []
|
||||
|
||||
def append(
|
||||
self,
|
||||
cb: Callable[
|
||||
["aqt.deckbrowser.DeckBrowser", "aqt.deckbrowser.DeckBrowserContent"], None
|
||||
],
|
||||
) -> None:
|
||||
"""(deck_browser: aqt.deckbrowser.DeckBrowser, content: aqt.deckbrowser.DeckBrowserContent)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(
|
||||
self,
|
||||
cb: Callable[
|
||||
["aqt.deckbrowser.DeckBrowser", "aqt.deckbrowser.DeckBrowserContent"], None
|
||||
],
|
||||
) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
deck_browser: aqt.deckbrowser.DeckBrowser,
|
||||
content: aqt.deckbrowser.DeckBrowserContent,
|
||||
) -> None:
|
||||
for hook in self._hooks:
|
||||
try:
|
||||
hook(deck_browser, content)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(hook)
|
||||
raise
|
||||
|
||||
|
||||
deck_browser_will_render_content = _DeckBrowserWillRenderContentHook()
|
||||
|
||||
|
||||
class _DeckBrowserWillShowOptionsMenuHook:
|
||||
_hooks: List[Callable[[QMenu, int], None]] = []
|
||||
|
||||
@ -598,6 +754,54 @@ class _EditorWillUseFontForFieldFilter:
|
||||
editor_will_use_font_for_field = _EditorWillUseFontForFieldFilter()
|
||||
|
||||
|
||||
class _MediaSyncDidProgressHook:
|
||||
_hooks: List[Callable[["aqt.mediasync.LogEntryWithTime"], None]] = []
|
||||
|
||||
def append(self, cb: Callable[["aqt.mediasync.LogEntryWithTime"], None]) -> None:
|
||||
"""(entry: aqt.mediasync.LogEntryWithTime)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(self, cb: Callable[["aqt.mediasync.LogEntryWithTime"], None]) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(self, entry: aqt.mediasync.LogEntryWithTime) -> None:
|
||||
for hook in self._hooks:
|
||||
try:
|
||||
hook(entry)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(hook)
|
||||
raise
|
||||
|
||||
|
||||
media_sync_did_progress = _MediaSyncDidProgressHook()
|
||||
|
||||
|
||||
class _MediaSyncDidStartOrStopHook:
|
||||
_hooks: List[Callable[[bool], None]] = []
|
||||
|
||||
def append(self, cb: Callable[[bool], None]) -> None:
|
||||
"""(running: bool)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(self, cb: Callable[[bool], None]) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(self, running: bool) -> None:
|
||||
for hook in self._hooks:
|
||||
try:
|
||||
hook(running)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(hook)
|
||||
raise
|
||||
|
||||
|
||||
media_sync_did_start_or_stop = _MediaSyncDidStartOrStopHook()
|
||||
|
||||
|
||||
class _OverviewDidRefreshHook:
|
||||
"""Allow to update the overview window. E.g. add the deck name in the
|
||||
title."""
|
||||
@ -625,6 +829,55 @@ class _OverviewDidRefreshHook:
|
||||
overview_did_refresh = _OverviewDidRefreshHook()
|
||||
|
||||
|
||||
class _OverviewWillRenderContentHook:
|
||||
"""Used to modify HTML content sections in the overview body
|
||||
|
||||
'content' contains the sections of HTML content the overview body
|
||||
will be updated with.
|
||||
|
||||
When modifying the content of a particular section, please make sure your
|
||||
changes only perform the minimum required edits to make your add-on work.
|
||||
You should avoid overwriting or interfering with existing data as much
|
||||
as possible, instead opting to append your own changes, e.g.:
|
||||
|
||||
def on_overview_will_render_content(overview, content):
|
||||
content.table += "
|
||||
<div>my html</div>"
|
||||
"""
|
||||
|
||||
_hooks: List[
|
||||
Callable[["aqt.overview.Overview", "aqt.overview.OverviewContent"], None]
|
||||
] = []
|
||||
|
||||
def append(
|
||||
self,
|
||||
cb: Callable[["aqt.overview.Overview", "aqt.overview.OverviewContent"], None],
|
||||
) -> None:
|
||||
"""(overview: aqt.overview.Overview, content: aqt.overview.OverviewContent)"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(
|
||||
self,
|
||||
cb: Callable[["aqt.overview.Overview", "aqt.overview.OverviewContent"], None],
|
||||
) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(
|
||||
self, overview: aqt.overview.Overview, content: aqt.overview.OverviewContent
|
||||
) -> None:
|
||||
for hook in self._hooks:
|
||||
try:
|
||||
hook(overview, content)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(hook)
|
||||
raise
|
||||
|
||||
|
||||
overview_will_render_content = _OverviewWillRenderContentHook()
|
||||
|
||||
|
||||
class _ProfileDidOpenHook:
|
||||
_hooks: List[Callable[[], None]] = []
|
||||
|
||||
@ -1161,6 +1414,67 @@ class _WebviewDidReceiveJsMessageFilter:
|
||||
webview_did_receive_js_message = _WebviewDidReceiveJsMessageFilter()
|
||||
|
||||
|
||||
class _WebviewWillSetContentHook:
|
||||
"""Used to modify web content before it is rendered.
|
||||
|
||||
Web_content contains the HTML, JS, and CSS the web view will be
|
||||
populated with.
|
||||
|
||||
Context is the instance that was passed to stdHtml().
|
||||
It can be inspected to check which screen this hook is firing
|
||||
in, and to get a reference to the screen. For example, if your
|
||||
code wishes to function only in the review screen, you could do:
|
||||
|
||||
def on_webview_will_set_content(web_content: WebContent, context):
|
||||
|
||||
if not isinstance(context, aqt.reviewer.Reviewer):
|
||||
# not reviewer, do not modify content
|
||||
return
|
||||
|
||||
# reviewer, perform changes to content
|
||||
|
||||
context: aqt.reviewer.Reviewer
|
||||
|
||||
addon_package = mw.addonManager.addonFromModule(__name__)
|
||||
|
||||
web_content.css.append(
|
||||
f"/_addons/{addon_package}/web/my-addon.css")
|
||||
web_content.js.append(
|
||||
f"/_addons/{addon_package}/web/my-addon.js")
|
||||
|
||||
web_content.head += "<script>console.log('my-addon')</script>"
|
||||
web_content.body += "<div id='my-addon'></div>"
|
||||
"""
|
||||
|
||||
_hooks: List[Callable[["aqt.webview.WebContent", Optional[Any]], None]] = []
|
||||
|
||||
def append(
|
||||
self, cb: Callable[["aqt.webview.WebContent", Optional[Any]], None]
|
||||
) -> None:
|
||||
"""(web_content: aqt.webview.WebContent, context: Optional[Any])"""
|
||||
self._hooks.append(cb)
|
||||
|
||||
def remove(
|
||||
self, cb: Callable[["aqt.webview.WebContent", Optional[Any]], None]
|
||||
) -> None:
|
||||
if cb in self._hooks:
|
||||
self._hooks.remove(cb)
|
||||
|
||||
def __call__(
|
||||
self, web_content: aqt.webview.WebContent, context: Optional[Any]
|
||||
) -> None:
|
||||
for hook in self._hooks:
|
||||
try:
|
||||
hook(web_content, context)
|
||||
except:
|
||||
# if the hook fails, remove it
|
||||
self._hooks.remove(hook)
|
||||
raise
|
||||
|
||||
|
||||
webview_will_set_content = _WebviewWillSetContentHook()
|
||||
|
||||
|
||||
class _WebviewWillShowContextMenuHook:
|
||||
_hooks: List[Callable[["aqt.webview.AnkiWebView", QMenu], None]] = []
|
||||
|
||||
|
154
qt/aqt/main.py
154
qt/aqt/main.py
@ -15,8 +15,6 @@ from argparse import Namespace
|
||||
from threading import Thread
|
||||
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
|
||||
|
||||
from send2trash import send2trash
|
||||
|
||||
import anki
|
||||
import aqt
|
||||
import aqt.mediasrv
|
||||
@ -36,6 +34,8 @@ from anki.utils import devMode, ids2str, intTime, isMac, isWin, splitFields
|
||||
from aqt import gui_hooks
|
||||
from aqt.addons import DownloadLogEntry, check_and_prompt_for_updates, show_log_to_user
|
||||
from aqt.legacy import install_pylib_legacy
|
||||
from aqt.mediacheck import check_media_db
|
||||
from aqt.mediasync import MediaSyncer
|
||||
from aqt.profiles import ProfileManager as ProfileManagerType
|
||||
from aqt.qt import *
|
||||
from aqt.qt import sip
|
||||
@ -83,6 +83,7 @@ class AnkiQt(QMainWindow):
|
||||
self.opts = opts
|
||||
self.col: Optional[_Collection] = None
|
||||
self.taskman = TaskManager()
|
||||
self.media_syncer = MediaSyncer(self)
|
||||
aqt.mw = self
|
||||
self.app = app
|
||||
self.pm = profileManager
|
||||
@ -132,7 +133,7 @@ class AnkiQt(QMainWindow):
|
||||
self.setupSignals()
|
||||
self.setupAutoUpdate()
|
||||
self.setupHooks()
|
||||
self.setupRefreshTimer()
|
||||
self.setup_timers()
|
||||
self.updateTitleBar()
|
||||
# screens
|
||||
self.setupDeckBrowser()
|
||||
@ -342,6 +343,8 @@ close the profile or restart Anki."""
|
||||
if not self.loadCollection():
|
||||
return
|
||||
|
||||
self.maybe_auto_sync_media()
|
||||
|
||||
self.pm.apply_profile_options()
|
||||
|
||||
# show main window
|
||||
@ -371,6 +374,9 @@ close the profile or restart Anki."""
|
||||
self._unloadProfile()
|
||||
onsuccess()
|
||||
|
||||
# start media sync if not already running
|
||||
self.maybe_auto_sync_media()
|
||||
|
||||
gui_hooks.profile_will_close()
|
||||
self.unloadCollection(callback)
|
||||
|
||||
@ -385,7 +391,7 @@ close the profile or restart Anki."""
|
||||
# at this point there should be no windows left
|
||||
self._checkForUnclosedWidgets()
|
||||
|
||||
self.maybeAutoSync()
|
||||
self.maybeAutoSync(True)
|
||||
|
||||
def _checkForUnclosedWidgets(self) -> None:
|
||||
for w in self.app.topLevelWidgets():
|
||||
@ -663,9 +669,8 @@ from the profile screen."
|
||||
if self.resetModal:
|
||||
# we don't have to change the webview, as we have a covering window
|
||||
return
|
||||
self.web.set_bridge_command(
|
||||
lambda url: self.delayedMaybeReset(), ResetRequired(self)
|
||||
)
|
||||
web_context = ResetRequired(self)
|
||||
self.web.set_bridge_command(lambda url: self.delayedMaybeReset(), web_context)
|
||||
i = _("Waiting for editing to finish.")
|
||||
b = self.button("refresh", _("Resume Now"), id="resume")
|
||||
self.web.stdHtml(
|
||||
@ -676,7 +681,8 @@ from the profile screen."
|
||||
%s</div></div></center>
|
||||
<script>$('#resume').focus()</script>
|
||||
"""
|
||||
% (i, b)
|
||||
% (i, b),
|
||||
context=web_context,
|
||||
)
|
||||
self.bottomWeb.hide()
|
||||
self.web.setFocus()
|
||||
@ -717,19 +723,16 @@ title="%s" %s>%s</button>""" % (
|
||||
self.form = aqt.forms.main.Ui_MainWindow()
|
||||
self.form.setupUi(self)
|
||||
# toolbar
|
||||
tweb = self.toolbarWeb = aqt.webview.AnkiWebView()
|
||||
tweb.title = "top toolbar"
|
||||
tweb = self.toolbarWeb = aqt.webview.AnkiWebView(title="top toolbar")
|
||||
tweb.setFocusPolicy(Qt.WheelFocus)
|
||||
self.toolbar = aqt.toolbar.Toolbar(self, tweb)
|
||||
self.toolbar.draw()
|
||||
# main area
|
||||
self.web = aqt.webview.AnkiWebView()
|
||||
self.web.title = "main webview"
|
||||
self.web = aqt.webview.AnkiWebView(title="main webview")
|
||||
self.web.setFocusPolicy(Qt.WheelFocus)
|
||||
self.web.setMinimumWidth(400)
|
||||
# bottom area
|
||||
sweb = self.bottomWeb = aqt.webview.AnkiWebView()
|
||||
sweb.title = "bottom toolbar"
|
||||
sweb = self.bottomWeb = aqt.webview.AnkiWebView(title="bottom toolbar")
|
||||
sweb.setFocusPolicy(Qt.WheelFocus)
|
||||
# add in a layout
|
||||
self.mainLayout = QVBoxLayout()
|
||||
@ -833,15 +836,19 @@ title="%s" %s>%s</button>""" % (
|
||||
# expects a current profile and a loaded collection; reloads
|
||||
# collection after sync completes
|
||||
def onSync(self):
|
||||
self.unloadCollection(self._onSync)
|
||||
if self.media_syncer.is_syncing():
|
||||
self.media_syncer.show_sync_log()
|
||||
else:
|
||||
self.unloadCollection(self._onSync)
|
||||
|
||||
def _onSync(self):
|
||||
self._sync()
|
||||
if not self.loadCollection():
|
||||
return
|
||||
self.media_syncer.start()
|
||||
|
||||
# expects a current profile, but no collection loaded
|
||||
def maybeAutoSync(self) -> None:
|
||||
def maybeAutoSync(self, closing=False) -> None:
|
||||
if (
|
||||
not self.pm.profile["syncKey"]
|
||||
or not self.pm.profile["autoSync"]
|
||||
@ -853,6 +860,15 @@ title="%s" %s>%s</button>""" % (
|
||||
# ok to sync
|
||||
self._sync()
|
||||
|
||||
# if media still syncing at this point, pop up progress diag
|
||||
if closing:
|
||||
self.media_syncer.show_diag_until_finished()
|
||||
|
||||
def maybe_auto_sync_media(self) -> None:
|
||||
if not self.pm.profile["autoSync"] or self.safeMode or self.restoringBackup:
|
||||
return
|
||||
self.media_syncer.start()
|
||||
|
||||
def _sync(self):
|
||||
from aqt.sync import SyncManager
|
||||
|
||||
@ -1098,7 +1114,7 @@ title="%s" %s>%s</button>""" % (
|
||||
if qtminor < 11:
|
||||
m.actionUndo.setShortcut(QKeySequence("Ctrl+Alt+Z"))
|
||||
m.actionFullDatabaseCheck.triggered.connect(self.onCheckDB)
|
||||
m.actionCheckMediaDatabase.triggered.connect(self.onCheckMediaDB)
|
||||
m.actionCheckMediaDatabase.triggered.connect(self.on_check_media_db)
|
||||
m.actionDocumentation.triggered.connect(self.onDocumentation)
|
||||
m.actionDonate.triggered.connect(self.onDonate)
|
||||
m.actionStudyDeck.triggered.connect(self.onStudyDeck)
|
||||
@ -1153,12 +1169,14 @@ Difference to correct time: %s."""
|
||||
showWarning(warn)
|
||||
self.app.closeAllWindows()
|
||||
|
||||
# Count refreshing
|
||||
# Timers
|
||||
##########################################################################
|
||||
|
||||
def setupRefreshTimer(self) -> None:
|
||||
# every 10 minutes
|
||||
def setup_timers(self) -> None:
|
||||
# refresh decks every 10 minutes
|
||||
self.progress.timer(10 * 60 * 1000, self.onRefreshTimer, True)
|
||||
# check media sync every 5 minutes
|
||||
self.progress.timer(5 * 60 * 1000, self.on_autosync_timer, True)
|
||||
|
||||
def onRefreshTimer(self):
|
||||
if self.state == "deckBrowser":
|
||||
@ -1166,6 +1184,12 @@ Difference to correct time: %s."""
|
||||
elif self.state == "overview":
|
||||
self.overview.refresh()
|
||||
|
||||
def on_autosync_timer(self):
|
||||
elap = self.media_syncer.seconds_since_last_sync()
|
||||
# autosync if 15 minutes have elapsed since last sync
|
||||
if elap > 15 * 60:
|
||||
self.maybe_auto_sync_media()
|
||||
|
||||
# Permanent libanki hooks
|
||||
##########################################################################
|
||||
|
||||
@ -1265,94 +1289,8 @@ will be lost. Continue?"""
|
||||
continue
|
||||
return ret
|
||||
|
||||
def onCheckMediaDB(self):
|
||||
self.progress.start(immediate=True)
|
||||
(nohave, unused, warnings) = self.col.media.check()
|
||||
self.progress.finish()
|
||||
# generate report
|
||||
report = ""
|
||||
if warnings:
|
||||
report += "\n".join(warnings) + "\n"
|
||||
if unused:
|
||||
numberOfUnusedFilesLabel = len(unused)
|
||||
if report:
|
||||
report += "\n\n\n"
|
||||
report += (
|
||||
ngettext(
|
||||
"%d file found in media folder not used by any cards:",
|
||||
"%d files found in media folder not used by any cards:",
|
||||
numberOfUnusedFilesLabel,
|
||||
)
|
||||
% numberOfUnusedFilesLabel
|
||||
)
|
||||
report += "\n" + "\n".join(unused)
|
||||
if nohave:
|
||||
if report:
|
||||
report += "\n\n\n"
|
||||
report += _("Used on cards but missing from media folder:")
|
||||
report += "\n" + "\n".join(nohave)
|
||||
if not report:
|
||||
tooltip(_("No unused or missing files found."))
|
||||
return
|
||||
# show report and offer to delete
|
||||
diag = QDialog(self)
|
||||
diag.setWindowTitle("Anki")
|
||||
layout = QVBoxLayout(diag)
|
||||
diag.setLayout(layout)
|
||||
text = QTextEdit()
|
||||
text.setReadOnly(True)
|
||||
text.setPlainText(report)
|
||||
layout.addWidget(text)
|
||||
box = QDialogButtonBox(QDialogButtonBox.Close)
|
||||
layout.addWidget(box)
|
||||
if unused:
|
||||
b = QPushButton(_("Delete Unused Files"))
|
||||
b.setAutoDefault(False)
|
||||
box.addButton(b, QDialogButtonBox.ActionRole)
|
||||
b.clicked.connect(lambda c, u=unused, d=diag: self.deleteUnused(u, d))
|
||||
|
||||
box.rejected.connect(diag.reject)
|
||||
diag.setMinimumHeight(400)
|
||||
diag.setMinimumWidth(500)
|
||||
restoreGeom(diag, "checkmediadb")
|
||||
diag.exec_()
|
||||
saveGeom(diag, "checkmediadb")
|
||||
|
||||
def deleteUnused(self, unused, diag):
|
||||
if not askUser(_("Delete unused media?")):
|
||||
return
|
||||
mdir = self.col.media.dir()
|
||||
self.progress.start(immediate=True)
|
||||
try:
|
||||
lastProgress = 0
|
||||
for c, f in enumerate(unused):
|
||||
path = os.path.join(mdir, f)
|
||||
if os.path.exists(path):
|
||||
send2trash(path)
|
||||
|
||||
now = time.time()
|
||||
if now - lastProgress >= 0.3:
|
||||
numberOfRemainingFilesToBeDeleted = len(unused) - c
|
||||
lastProgress = now
|
||||
label = (
|
||||
ngettext(
|
||||
"%d file remaining...",
|
||||
"%d files remaining...",
|
||||
numberOfRemainingFilesToBeDeleted,
|
||||
)
|
||||
% numberOfRemainingFilesToBeDeleted
|
||||
)
|
||||
self.progress.update(label)
|
||||
finally:
|
||||
self.progress.finish()
|
||||
# caller must not pass in empty list
|
||||
# pylint: disable=undefined-loop-variable
|
||||
numberOfFilesDeleted = c + 1
|
||||
tooltip(
|
||||
ngettext("Deleted %d file.", "Deleted %d files.", numberOfFilesDeleted)
|
||||
% numberOfFilesDeleted
|
||||
)
|
||||
diag.close()
|
||||
def on_check_media_db(self) -> None:
|
||||
check_media_db(self)
|
||||
|
||||
def onStudyDeck(self):
|
||||
from aqt.studydeck import StudyDeck
|
||||
|
158
qt/aqt/mediacheck.py
Normal file
158
qt/aqt/mediacheck.py
Normal file
@ -0,0 +1,158 @@
|
||||
# Copyright: Ankitects Pty Ltd and contributors
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import time
|
||||
from concurrent.futures import Future
|
||||
from typing import Iterable, List, Optional, TypeVar
|
||||
|
||||
import aqt
|
||||
from anki import hooks
|
||||
from anki.rsbackend import (
|
||||
Interrupted,
|
||||
MediaCheckOutput,
|
||||
Progress,
|
||||
ProgressKind,
|
||||
StringsGroup,
|
||||
)
|
||||
from aqt.qt import *
|
||||
from aqt.utils import askUser, restoreGeom, saveGeom, showText, tooltip, tr
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def chunked_list(l: Iterable[T], n: int) -> Iterable[List[T]]:
|
||||
l = iter(l)
|
||||
while True:
|
||||
res = list(itertools.islice(l, n))
|
||||
if not res:
|
||||
return
|
||||
yield res
|
||||
|
||||
|
||||
def check_media_db(mw: aqt.AnkiQt) -> None:
|
||||
c = MediaChecker(mw)
|
||||
c.check()
|
||||
|
||||
|
||||
class MediaChecker:
|
||||
progress_dialog: Optional[aqt.progress.ProgressDialog]
|
||||
|
||||
def __init__(self, mw: aqt.AnkiQt) -> None:
|
||||
self.mw = mw
|
||||
|
||||
def check(self) -> None:
|
||||
self.progress_dialog = self.mw.progress.start()
|
||||
hooks.bg_thread_progress_callback.append(self._on_progress)
|
||||
self.mw.col.close()
|
||||
self.mw.taskman.run_in_background(self._check, self._on_finished)
|
||||
|
||||
def _on_progress(self, proceed: bool, progress: Progress) -> bool:
|
||||
if progress.kind != ProgressKind.MediaCheck:
|
||||
return proceed
|
||||
|
||||
if self.progress_dialog.wantCancel:
|
||||
return False
|
||||
|
||||
self.mw.taskman.run_on_main(lambda: self.mw.progress.update(progress.val))
|
||||
return True
|
||||
|
||||
def _check(self) -> MediaCheckOutput:
|
||||
"Run the check on a background thread."
|
||||
return self.mw.col.media.check()
|
||||
|
||||
def _on_finished(self, future: Future) -> None:
|
||||
hooks.bg_thread_progress_callback.remove(self._on_progress)
|
||||
self.mw.progress.finish()
|
||||
self.progress_dialog = None
|
||||
self.mw.col.reopen()
|
||||
|
||||
exc = future.exception()
|
||||
if isinstance(exc, Interrupted):
|
||||
return
|
||||
|
||||
output: MediaCheckOutput = future.result()
|
||||
report = output.report
|
||||
|
||||
# show report and offer to delete
|
||||
diag = QDialog(self.mw)
|
||||
diag.setWindowTitle("Anki")
|
||||
layout = QVBoxLayout(diag)
|
||||
diag.setLayout(layout)
|
||||
text = QTextEdit()
|
||||
text.setReadOnly(True)
|
||||
text.setPlainText(report)
|
||||
layout.addWidget(text)
|
||||
box = QDialogButtonBox(QDialogButtonBox.Close)
|
||||
layout.addWidget(box)
|
||||
|
||||
if output.unused:
|
||||
b = QPushButton(tr(StringsGroup.MEDIA_CHECK, "delete-unused"))
|
||||
b.setAutoDefault(False)
|
||||
box.addButton(b, QDialogButtonBox.RejectRole)
|
||||
b.clicked.connect(lambda c: self._on_trash_files(output.unused)) # type: ignore
|
||||
|
||||
if output.missing:
|
||||
if any(map(lambda x: x.startswith("latex-"), output.missing)):
|
||||
b = QPushButton(tr(StringsGroup.MEDIA_CHECK, "render-latex"))
|
||||
b.setAutoDefault(False)
|
||||
box.addButton(b, QDialogButtonBox.RejectRole)
|
||||
b.clicked.connect(self._on_render_latex) # type: ignore
|
||||
|
||||
box.rejected.connect(diag.reject) # type: ignore
|
||||
diag.setMinimumHeight(400)
|
||||
diag.setMinimumWidth(500)
|
||||
restoreGeom(diag, "checkmediadb")
|
||||
diag.exec_()
|
||||
saveGeom(diag, "checkmediadb")
|
||||
|
||||
def _on_render_latex(self):
|
||||
self.progress_dialog = self.mw.progress.start()
|
||||
try:
|
||||
out = self.mw.col.media.render_all_latex(self._on_render_latex_progress)
|
||||
if self.progress_dialog.wantCancel:
|
||||
return
|
||||
finally:
|
||||
self.mw.progress.finish()
|
||||
self.progress_dialog = None
|
||||
|
||||
if out is not None:
|
||||
nid, err = out
|
||||
browser = aqt.dialogs.open("Browser", self.mw)
|
||||
browser.form.searchEdit.lineEdit().setText("nid:%d" % nid)
|
||||
browser.onSearchActivated()
|
||||
showText(err, type="html")
|
||||
else:
|
||||
tooltip(tr(StringsGroup.MEDIA_CHECK, "all-latex-rendered"))
|
||||
|
||||
def _on_render_latex_progress(self, count: int) -> bool:
|
||||
if self.progress_dialog.wantCancel:
|
||||
return False
|
||||
|
||||
self.mw.progress.update(tr(StringsGroup.MEDIA_CHECK, "checked", count=count))
|
||||
return True
|
||||
|
||||
def _on_trash_files(self, fnames: List[str]):
|
||||
if not askUser(tr(StringsGroup.MEDIA_CHECK, "delete-unused-confirm")):
|
||||
return
|
||||
|
||||
self.progress_dialog = self.mw.progress.start()
|
||||
|
||||
last_progress = time.time()
|
||||
remaining = len(fnames)
|
||||
total = len(fnames)
|
||||
try:
|
||||
for chunk in chunked_list(fnames, 25):
|
||||
self.mw.col.media.trash_files(chunk)
|
||||
remaining -= len(chunk)
|
||||
if time.time() - last_progress >= 0.3:
|
||||
self.mw.progress.update(
|
||||
tr(StringsGroup.MEDIA_CHECK, "files-remaining", count=remaining)
|
||||
)
|
||||
finally:
|
||||
self.mw.progress.finish()
|
||||
self.progress_dialog = None
|
||||
|
||||
tooltip(tr(StringsGroup.MEDIA_CHECK, "delete-unused-complete", count=total))
|
224
qt/aqt/mediasync.py
Normal file
224
qt/aqt/mediasync.py
Normal file
@ -0,0 +1,224 @@
|
||||
# Copyright: Ankitects Pty Ltd and contributors
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from concurrent.futures import Future
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
|
||||
import aqt
|
||||
from anki import hooks
|
||||
from anki.rsbackend import (
|
||||
Interrupted,
|
||||
MediaSyncProgress,
|
||||
NetworkError,
|
||||
Progress,
|
||||
ProgressKind,
|
||||
StringsGroup,
|
||||
SyncError,
|
||||
)
|
||||
from anki.types import assert_impossible
|
||||
from anki.utils import intTime
|
||||
from aqt import gui_hooks
|
||||
from aqt.qt import QDialog, QDialogButtonBox, QPushButton
|
||||
from aqt.utils import showWarning, tr
|
||||
|
||||
LogEntry = Union[MediaSyncProgress, str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class LogEntryWithTime:
|
||||
time: int
|
||||
entry: LogEntry
|
||||
|
||||
|
||||
class MediaSyncer:
|
||||
def __init__(self, mw: aqt.main.AnkiQt):
|
||||
self.mw = mw
|
||||
self._syncing: bool = False
|
||||
self._log: List[LogEntryWithTime] = []
|
||||
self._want_stop = False
|
||||
hooks.bg_thread_progress_callback.append(self._on_rust_progress)
|
||||
gui_hooks.media_sync_did_start_or_stop.append(self._on_start_stop)
|
||||
|
||||
def _on_rust_progress(self, proceed: bool, progress: Progress) -> bool:
|
||||
if progress.kind != ProgressKind.MediaSync:
|
||||
return proceed
|
||||
|
||||
assert isinstance(progress.val, MediaSyncProgress)
|
||||
self._log_and_notify(progress.val)
|
||||
|
||||
if self._want_stop:
|
||||
return False
|
||||
else:
|
||||
return proceed
|
||||
|
||||
def start(self) -> None:
|
||||
"Start media syncing in the background, if it's not already running."
|
||||
if self._syncing:
|
||||
return
|
||||
|
||||
hkey = self.mw.pm.sync_key()
|
||||
if hkey is None:
|
||||
return
|
||||
|
||||
if not self.mw.pm.media_syncing_enabled():
|
||||
self._log_and_notify(tr(StringsGroup.SYNC, "media-disabled"))
|
||||
return
|
||||
|
||||
self._log_and_notify(tr(StringsGroup.SYNC, "media-starting"))
|
||||
self._syncing = True
|
||||
self._want_stop = False
|
||||
gui_hooks.media_sync_did_start_or_stop(True)
|
||||
|
||||
def run() -> None:
|
||||
self.mw.col.backend.sync_media(hkey, self._endpoint())
|
||||
|
||||
self.mw.taskman.run_in_background(run, self._on_finished)
|
||||
|
||||
def _endpoint(self) -> str:
|
||||
shard = self.mw.pm.sync_shard()
|
||||
if shard is not None:
|
||||
shard_str = str(shard)
|
||||
else:
|
||||
shard_str = ""
|
||||
return f"https://sync{shard_str}.ankiweb.net/msync/"
|
||||
|
||||
def _log_and_notify(self, entry: LogEntry) -> None:
|
||||
entry_with_time = LogEntryWithTime(time=intTime(), entry=entry)
|
||||
self._log.append(entry_with_time)
|
||||
self.mw.taskman.run_on_main(
|
||||
lambda: gui_hooks.media_sync_did_progress(entry_with_time)
|
||||
)
|
||||
|
||||
def _on_finished(self, future: Future) -> None:
|
||||
self._syncing = False
|
||||
gui_hooks.media_sync_did_start_or_stop(False)
|
||||
|
||||
exc = future.exception()
|
||||
if exc is not None:
|
||||
self._handle_sync_error(exc)
|
||||
else:
|
||||
self._log_and_notify(tr(StringsGroup.SYNC, "media-complete"))
|
||||
|
||||
def _handle_sync_error(self, exc: BaseException):
|
||||
if isinstance(exc, Interrupted):
|
||||
self._log_and_notify(tr(StringsGroup.SYNC, "media-aborted"))
|
||||
return
|
||||
|
||||
self._log_and_notify(tr(StringsGroup.SYNC, "media-failed"))
|
||||
if isinstance(exc, SyncError):
|
||||
showWarning(exc.localized())
|
||||
elif isinstance(exc, NetworkError):
|
||||
msg = exc.localized()
|
||||
msg += "\n\n" + tr(StringsGroup.NETWORK, "details", details=str(exc))
|
||||
else:
|
||||
raise exc
|
||||
|
||||
def entries(self) -> List[LogEntryWithTime]:
|
||||
return self._log
|
||||
|
||||
def abort(self) -> None:
|
||||
if not self.is_syncing():
|
||||
return
|
||||
self._log_and_notify(tr(StringsGroup.SYNC, "media-aborting"))
|
||||
self._want_stop = True
|
||||
|
||||
def is_syncing(self) -> bool:
|
||||
return self._syncing
|
||||
|
||||
def _on_start_stop(self, running: bool):
|
||||
self.mw.toolbar.set_sync_active(running) # type: ignore
|
||||
|
||||
def show_sync_log(self):
|
||||
aqt.dialogs.open("sync_log", self.mw, self)
|
||||
|
||||
def show_diag_until_finished(self):
|
||||
# nothing to do if not syncing
|
||||
if not self.is_syncing():
|
||||
return
|
||||
|
||||
diag: MediaSyncDialog = aqt.dialogs.open("sync_log", self.mw, self, True)
|
||||
diag.exec_()
|
||||
|
||||
def seconds_since_last_sync(self) -> int:
|
||||
if self.is_syncing():
|
||||
return 0
|
||||
|
||||
if self._log:
|
||||
last = self._log[-1].time
|
||||
else:
|
||||
last = 0
|
||||
return intTime() - last
|
||||
|
||||
|
||||
class MediaSyncDialog(QDialog):
|
||||
silentlyClose = True
|
||||
|
||||
def __init__(
|
||||
self, mw: aqt.main.AnkiQt, syncer: MediaSyncer, close_when_done: bool = False
|
||||
) -> None:
|
||||
super().__init__(mw)
|
||||
self.mw = mw
|
||||
self._syncer = syncer
|
||||
self._close_when_done = close_when_done
|
||||
self.form = aqt.forms.synclog.Ui_Dialog()
|
||||
self.form.setupUi(self)
|
||||
self.abort_button = QPushButton(tr(StringsGroup.SYNC, "abort"))
|
||||
self.abort_button.clicked.connect(self._on_abort) # type: ignore
|
||||
self.abort_button.setAutoDefault(False)
|
||||
self.form.buttonBox.addButton(self.abort_button, QDialogButtonBox.ActionRole)
|
||||
|
||||
gui_hooks.media_sync_did_progress.append(self._on_log_entry)
|
||||
gui_hooks.media_sync_did_start_or_stop.append(self._on_start_stop)
|
||||
|
||||
self.form.plainTextEdit.setPlainText(
|
||||
"\n".join(self._entry_to_text(x) for x in syncer.entries())
|
||||
)
|
||||
self.show()
|
||||
|
||||
def reject(self) -> None:
|
||||
if self._close_when_done and self._syncer.is_syncing():
|
||||
# closing while syncing on close starts an abort
|
||||
self._on_abort()
|
||||
return
|
||||
|
||||
aqt.dialogs.markClosed("sync_log")
|
||||
QDialog.reject(self)
|
||||
|
||||
def reopen(self, mw, syncer, close_when_done: bool = False) -> None:
|
||||
self._close_when_done = close_when_done
|
||||
self.show()
|
||||
|
||||
def _on_abort(self, *args) -> None:
|
||||
self._syncer.abort()
|
||||
self.abort_button.setHidden(True)
|
||||
|
||||
def _time_and_text(self, stamp: int, text: str) -> str:
|
||||
asctime = time.asctime(time.localtime(stamp))
|
||||
return f"{asctime}: {text}"
|
||||
|
||||
def _entry_to_text(self, entry: LogEntryWithTime):
|
||||
if isinstance(entry.entry, str):
|
||||
txt = entry.entry
|
||||
elif isinstance(entry.entry, MediaSyncProgress):
|
||||
txt = self._logentry_to_text(entry.entry)
|
||||
else:
|
||||
assert_impossible(entry.entry)
|
||||
return self._time_and_text(entry.time, txt)
|
||||
|
||||
def _logentry_to_text(self, e: MediaSyncProgress) -> str:
|
||||
return f"{e.added}, {e.removed}, {e.checked}"
|
||||
|
||||
def _on_log_entry(self, entry: LogEntryWithTime):
|
||||
self.form.plainTextEdit.appendPlainText(self._entry_to_text(entry))
|
||||
if not self._syncer.is_syncing():
|
||||
self.abort_button.setHidden(True)
|
||||
|
||||
def _on_start_stop(self, running: bool) -> None:
|
||||
if not running and self._close_when_done:
|
||||
aqt.dialogs.markClosed("sync_log")
|
||||
self._close_when_done = False
|
||||
self.close()
|
@ -4,6 +4,8 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
import aqt
|
||||
from anki.lang import _
|
||||
from aqt import gui_hooks
|
||||
@ -17,6 +19,24 @@ class OverviewBottomBar:
|
||||
self.overview = overview
|
||||
|
||||
|
||||
@dataclass
|
||||
class OverviewContent:
|
||||
"""Stores sections of HTML content that the overview will be
|
||||
populated with.
|
||||
|
||||
Attributes:
|
||||
deck {str} -- Plain text deck name
|
||||
shareLink {str} -- HTML of the share link section
|
||||
desc {str} -- HTML of the deck description section
|
||||
table {str} -- HTML of the deck stats table section
|
||||
"""
|
||||
|
||||
deck: str
|
||||
shareLink: str
|
||||
desc: str
|
||||
table: str
|
||||
|
||||
|
||||
class Overview:
|
||||
"Deck overview."
|
||||
|
||||
@ -141,16 +161,18 @@ class Overview:
|
||||
shareLink = '<a class=smallLink href="review">Reviews and Updates</a>'
|
||||
else:
|
||||
shareLink = ""
|
||||
content = OverviewContent(
|
||||
deck=deck["name"],
|
||||
shareLink=shareLink,
|
||||
desc=self._desc(deck),
|
||||
table=self._table(),
|
||||
)
|
||||
gui_hooks.overview_will_render_content(self, content)
|
||||
self.web.stdHtml(
|
||||
self._body
|
||||
% dict(
|
||||
deck=deck["name"],
|
||||
shareLink=shareLink,
|
||||
desc=self._desc(deck),
|
||||
table=self._table(),
|
||||
),
|
||||
self._body % content.__dict__,
|
||||
css=["overview.css"],
|
||||
js=["jquery.js", "overview.js"],
|
||||
context=self,
|
||||
)
|
||||
|
||||
def _desc(self, deck):
|
||||
@ -197,7 +219,7 @@ to their original deck."""
|
||||
<tr><td align=center valign=top>
|
||||
<table cellspacing=5>
|
||||
<tr><td>%s:</td><td><b><span class=new-count>%s</span></b></td></tr>
|
||||
<tr><td>%s:</td><td><b><font class=learn-count>%s</span></b></td></tr>
|
||||
<tr><td>%s:</td><td><b><span class=learn-count>%s</span></b></td></tr>
|
||||
<tr><td>%s:</td><td><b><span class=review-count>%s</span></b></td></tr>
|
||||
</table>
|
||||
</td><td align=center>
|
||||
@ -243,8 +265,9 @@ to their original deck."""
|
||||
<button title="%s" onclick='pycmd("%s")'>%s</button>""" % tuple(
|
||||
b
|
||||
)
|
||||
self.bottom.draw(buf)
|
||||
self.bottom.web.set_bridge_command(self._linkHandler, OverviewBottomBar(self))
|
||||
self.bottom.draw(
|
||||
buf=buf, link_handler=self._linkHandler, web_context=OverviewBottomBar(self)
|
||||
)
|
||||
|
||||
# Studying more
|
||||
######################################################################
|
||||
|
@ -22,10 +22,13 @@ import uuid
|
||||
|
||||
import PyQt5.QtSvg
|
||||
|
||||
from anki.utils import isWin
|
||||
from anki.utils import isLin, isWin
|
||||
|
||||
# external module access in Windows
|
||||
if isWin:
|
||||
import pythoncom
|
||||
import win32com
|
||||
import pywintypes
|
||||
|
||||
if isLin:
|
||||
import fcntl
|
||||
|
@ -10,7 +10,7 @@ import aqt
|
||||
from anki.lang import _
|
||||
from aqt import AnkiQt
|
||||
from aqt.qt import *
|
||||
from aqt.utils import askUser, openHelp, showInfo
|
||||
from aqt.utils import askUser, openHelp, showInfo, showWarning
|
||||
|
||||
|
||||
class Preferences(QDialog):
|
||||
@ -59,7 +59,7 @@ class Preferences(QDialog):
|
||||
def langIdx(self):
|
||||
codes = [x[1] for x in anki.lang.langs]
|
||||
try:
|
||||
return codes.index(anki.lang.getLang())
|
||||
return codes.index(anki.lang.currentLang)
|
||||
except:
|
||||
return codes.index("en_US")
|
||||
|
||||
@ -196,9 +196,12 @@ Not currently enabled; click the sync button in the main window to enable."""
|
||||
)
|
||||
)
|
||||
|
||||
def onSyncDeauth(self):
|
||||
def onSyncDeauth(self) -> None:
|
||||
if self.mw.media_syncer.is_syncing():
|
||||
showWarning("Can't log out while sync in progress.")
|
||||
return
|
||||
self.prof["syncKey"] = None
|
||||
self.mw.col.media.forceResync()
|
||||
self.mw.col.media.force_resync()
|
||||
self._hideAuth()
|
||||
|
||||
def updateNetwork(self):
|
||||
|
@ -11,7 +11,7 @@ import locale
|
||||
import pickle
|
||||
import random
|
||||
import shutil
|
||||
from typing import Any, Dict
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from send2trash import send2trash
|
||||
|
||||
@ -442,7 +442,7 @@ please see:
|
||||
sql = "update profiles set data = ? where name = ?"
|
||||
self.db.execute(sql, self._pickle(self.meta), "_global")
|
||||
self.db.commit()
|
||||
anki.lang.setLang(code, locale_dir(), local=False)
|
||||
anki.lang.set_lang(code, locale_dir())
|
||||
|
||||
# OpenGL
|
||||
######################################################################
|
||||
@ -502,7 +502,7 @@ please see:
|
||||
def set_night_mode(self, on: bool) -> None:
|
||||
self.meta["night_mode"] = on
|
||||
|
||||
# Profile-specific options
|
||||
# Profile-specific
|
||||
######################################################################
|
||||
|
||||
def interrupt_audio(self) -> bool:
|
||||
@ -512,6 +512,18 @@ please see:
|
||||
self.profile["interrupt_audio"] = val
|
||||
aqt.sound.av_player.interrupt_current_audio = val
|
||||
|
||||
def sync_key(self) -> Optional[str]:
|
||||
return self.profile.get("syncKey")
|
||||
|
||||
def set_sync_key(self, val: Optional[str]) -> None:
|
||||
self.profile["syncKey"] = val
|
||||
|
||||
def media_syncing_enabled(self) -> bool:
|
||||
return self.profile["syncMedia"]
|
||||
|
||||
def sync_shard(self) -> Optional[int]:
|
||||
return self.profile.get("hostNum")
|
||||
|
||||
######################################################################
|
||||
|
||||
def apply_profile_options(self) -> None:
|
||||
|
@ -2,7 +2,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import aqt.forms
|
||||
from anki.lang import _
|
||||
@ -82,42 +85,20 @@ class ProgressManager:
|
||||
# Creating progress dialogs
|
||||
##########################################################################
|
||||
|
||||
class ProgressDialog(QDialog):
|
||||
def __init__(self, parent):
|
||||
QDialog.__init__(self, parent)
|
||||
self.form = aqt.forms.progress.Ui_Dialog()
|
||||
self.form.setupUi(self)
|
||||
self._closingDown = False
|
||||
self.wantCancel = False
|
||||
|
||||
def cancel(self):
|
||||
self._closingDown = True
|
||||
self.hide()
|
||||
|
||||
def closeEvent(self, evt):
|
||||
if self._closingDown:
|
||||
evt.accept()
|
||||
else:
|
||||
self.wantCancel = True
|
||||
evt.ignore()
|
||||
|
||||
def keyPressEvent(self, evt):
|
||||
if evt.key() == Qt.Key_Escape:
|
||||
evt.ignore()
|
||||
self.wantCancel = True
|
||||
|
||||
# note: immediate is no longer used
|
||||
def start(self, max=0, min=0, label=None, parent=None, immediate=False):
|
||||
def start(
|
||||
self, max=0, min=0, label=None, parent=None, immediate=False
|
||||
) -> Optional[ProgressDialog]:
|
||||
self._levels += 1
|
||||
if self._levels > 1:
|
||||
return
|
||||
return None
|
||||
# setup window
|
||||
parent = parent or self.app.activeWindow()
|
||||
if not parent and self.mw.isVisible():
|
||||
parent = self.mw
|
||||
|
||||
label = label or _("Processing...")
|
||||
self._win = self.ProgressDialog(parent)
|
||||
self._win = ProgressDialog(parent)
|
||||
self._win.form.progressBar.setMinimum(min)
|
||||
self._win.form.progressBar.setMaximum(max)
|
||||
self._win.form.progressBar.setTextVisible(False)
|
||||
@ -151,7 +132,7 @@ class ProgressManager:
|
||||
self._win.form.progressBar.setValue(self._counter)
|
||||
if process and elapsed >= 0.2:
|
||||
self._updating = True
|
||||
self.app.processEvents(QEventLoop.ExcludeUserInputEvents)
|
||||
self.app.processEvents()
|
||||
self._updating = False
|
||||
self._lastUpdate = time.time()
|
||||
|
||||
@ -207,3 +188,28 @@ class ProgressManager:
|
||||
def busy(self):
|
||||
"True if processing."
|
||||
return self._levels
|
||||
|
||||
|
||||
class ProgressDialog(QDialog):
|
||||
def __init__(self, parent):
|
||||
QDialog.__init__(self, parent)
|
||||
self.form = aqt.forms.progress.Ui_Dialog()
|
||||
self.form.setupUi(self)
|
||||
self._closingDown = False
|
||||
self.wantCancel = False
|
||||
|
||||
def cancel(self):
|
||||
self._closingDown = True
|
||||
self.hide()
|
||||
|
||||
def closeEvent(self, evt):
|
||||
if self._closingDown:
|
||||
evt.accept()
|
||||
else:
|
||||
self.wantCancel = True
|
||||
evt.ignore()
|
||||
|
||||
def keyPressEvent(self, evt):
|
||||
if evt.key() == Qt.Key_Escape:
|
||||
evt.ignore()
|
||||
self.wantCancel = True
|
||||
|
@ -153,6 +153,7 @@ class Reviewer:
|
||||
"mathjax/MathJax.js",
|
||||
"reviewer.js",
|
||||
],
|
||||
context=self,
|
||||
)
|
||||
# show answer / ease buttons
|
||||
self.bottom.web.show()
|
||||
@ -160,6 +161,7 @@ class Reviewer:
|
||||
self._bottomHTML(),
|
||||
css=["toolbar-bottom.css", "reviewer-bottom.css"],
|
||||
js=["jquery.js", "reviewer-bottom.js"],
|
||||
context=ReviewerBottomBar(self),
|
||||
)
|
||||
|
||||
# Showing the question
|
||||
|
@ -95,7 +95,10 @@ class DeckStats(QDialog):
|
||||
stats = self.mw.col.stats()
|
||||
stats.wholeCollection = self.wholeCollection
|
||||
self.report = stats.report(type=self.period)
|
||||
self.form.web.title = "deck stats"
|
||||
self.form.web.stdHtml(
|
||||
"<html><body>" + self.report + "</body></html>", js=["jquery.js", "plot.js"]
|
||||
"<html><body>" + self.report + "</body></html>",
|
||||
js=["jquery.js", "plot.js"],
|
||||
context=self,
|
||||
)
|
||||
self.mw.progress.finish()
|
||||
|
@ -7,7 +7,7 @@ import time
|
||||
from anki import hooks
|
||||
from anki.lang import _
|
||||
from anki.storage import Collection
|
||||
from anki.sync import FullSyncer, MediaSyncer, RemoteMediaServer, RemoteServer, Syncer
|
||||
from anki.sync import FullSyncer, RemoteServer, Syncer
|
||||
from aqt.qt import *
|
||||
from aqt.utils import askUserDialog, showInfo, showText, showWarning, tooltip
|
||||
|
||||
@ -42,7 +42,6 @@ class SyncManager(QObject):
|
||||
self.pm.collectionPath(),
|
||||
self.pm.profile["syncKey"],
|
||||
auth=auth,
|
||||
media=self.pm.profile["syncMedia"],
|
||||
hostNum=self.pm.profile.get("hostNum"),
|
||||
)
|
||||
t._event.connect(self.onEvent)
|
||||
@ -132,8 +131,6 @@ automatically."""
|
||||
m = _("Downloading from AnkiWeb...")
|
||||
elif t == "sanity":
|
||||
m = _("Checking...")
|
||||
elif t == "findMedia":
|
||||
m = _("Checking media...")
|
||||
elif t == "upgradeRequired":
|
||||
showText(
|
||||
_(
|
||||
@ -154,14 +151,6 @@ Please visit AnkiWeb, upgrade your deck, then try again."""
|
||||
self._clockOff()
|
||||
elif evt == "checkFailed":
|
||||
self._checkFailed()
|
||||
elif evt == "mediaSanity":
|
||||
showWarning(
|
||||
_(
|
||||
"""\
|
||||
A problem occurred while syncing media. Please use Tools>Check Media, then \
|
||||
sync again to correct the issue."""
|
||||
)
|
||||
)
|
||||
elif evt == "noChanges":
|
||||
pass
|
||||
elif evt == "fullSync":
|
||||
@ -358,12 +347,11 @@ class SyncThread(QThread):
|
||||
_event = pyqtSignal(str, str)
|
||||
progress_event = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, path, hkey, auth=None, media=True, hostNum=None):
|
||||
def __init__(self, path, hkey, auth=None, hostNum=None):
|
||||
QThread.__init__(self)
|
||||
self.path = path
|
||||
self.hkey = hkey
|
||||
self.auth = auth
|
||||
self.media = media
|
||||
self.hostNum = hostNum
|
||||
self._abort = 0 # 1=flagged, 2=aborting
|
||||
|
||||
@ -475,8 +463,6 @@ class SyncThread(QThread):
|
||||
self.syncMsg = self.client.syncMsg
|
||||
self.uname = self.client.uname
|
||||
self.hostNum = self.client.hostNum
|
||||
# then move on to media sync
|
||||
self._syncMedia()
|
||||
|
||||
def _fullSync(self):
|
||||
# tell the calling thread we need a decision on sync direction, and
|
||||
@ -505,29 +491,6 @@ class SyncThread(QThread):
|
||||
if "sync cancelled" in str(e):
|
||||
return
|
||||
raise
|
||||
# reopen db and move on to media sync
|
||||
self.col.reopen()
|
||||
self._syncMedia()
|
||||
|
||||
def _syncMedia(self):
|
||||
if not self.media:
|
||||
return
|
||||
self.server = RemoteMediaServer(
|
||||
self.col, self.hkey, self.server.client, hostNum=self.hostNum
|
||||
)
|
||||
self.client = MediaSyncer(self.col, self.server)
|
||||
try:
|
||||
ret = self.client.sync()
|
||||
except Exception as e:
|
||||
if "sync cancelled" in str(e):
|
||||
return
|
||||
raise
|
||||
if ret == "noChanges":
|
||||
self.fireEvent("noMediaChanges")
|
||||
elif ret == "sanityCheckFailed" or ret == "corruptMediaDB":
|
||||
self.fireEvent("mediaSanity")
|
||||
else:
|
||||
self.fireEvent("mediaSuccess")
|
||||
|
||||
def fireEvent(self, cmd, arg=""):
|
||||
self._event.emit(cmd, arg)
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Dict
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import aqt
|
||||
from anki.lang import _
|
||||
@ -40,10 +40,21 @@ class Toolbar:
|
||||
self.web.setFixedHeight(30)
|
||||
self.web.requiresCol = False
|
||||
|
||||
def draw(self):
|
||||
self.web.set_bridge_command(self._linkHandler, TopToolbar(self))
|
||||
self.web.stdHtml(self._body % self._centerLinks(), css=["toolbar.css"])
|
||||
def draw(
|
||||
self,
|
||||
buf: str = "",
|
||||
web_context: Optional[Any] = None,
|
||||
link_handler: Optional[Callable[[str], Any]] = None,
|
||||
):
|
||||
web_context = web_context or TopToolbar(self)
|
||||
link_handler = link_handler or self._linkHandler
|
||||
self.web.set_bridge_command(link_handler, web_context)
|
||||
self.web.stdHtml(
|
||||
self._body % self._centerLinks(), css=["toolbar.css"], context=web_context,
|
||||
)
|
||||
self.web.adjustHeightToFit()
|
||||
if self.mw.media_syncer.is_syncing():
|
||||
self.set_sync_active(True)
|
||||
|
||||
# Available links
|
||||
######################################################################
|
||||
@ -61,10 +72,9 @@ class Toolbar:
|
||||
("add", _("Add"), _("Shortcut key: %s") % "A"),
|
||||
("browse", _("Browse"), _("Shortcut key: %s") % "B"),
|
||||
("stats", _("Stats"), _("Shortcut key: %s") % "T"),
|
||||
("sync", _("Sync"), _("Shortcut key: %s") % "Y"),
|
||||
]
|
||||
gui_hooks.top_toolbar_did_init_links(links, self)
|
||||
return self._linkHTML(links)
|
||||
return self._linkHTML(links) + self._sync_link()
|
||||
|
||||
def _linkHTML(self, links):
|
||||
buf = ""
|
||||
@ -78,6 +88,22 @@ class Toolbar:
|
||||
)
|
||||
return buf
|
||||
|
||||
def _sync_link(self) -> str:
|
||||
name = _("Sync")
|
||||
title = _("Shortcut key: %s") % "Y"
|
||||
label = "sync"
|
||||
return f"""
|
||||
<a class=hitem tabindex="-1" aria-label="{name}" title="{title}" href=# onclick="return pycmd('{label}')">{name}
|
||||
<img id=sync-spinner src='/_anki/imgs/refresh.svg'>
|
||||
</a>"""
|
||||
|
||||
def set_sync_active(self, active: bool) -> None:
|
||||
if active:
|
||||
meth = "addClass"
|
||||
else:
|
||||
meth = "removeClass"
|
||||
self.web.eval(f"$('#sync-spinner').{meth}('spin')")
|
||||
|
||||
# Link handling
|
||||
######################################################################
|
||||
|
||||
@ -133,10 +159,19 @@ class BottomBar(Toolbar):
|
||||
%s</td></tr></table></center>
|
||||
"""
|
||||
|
||||
def draw(self, buf):
|
||||
def draw(
|
||||
self,
|
||||
buf: str = "",
|
||||
web_context: Optional[Any] = None,
|
||||
link_handler: Optional[Callable[[str], Any]] = None,
|
||||
):
|
||||
# note: some screens may override this
|
||||
self.web.set_bridge_command(self._linkHandler, BottomToolbar(self))
|
||||
web_context = web_context or BottomToolbar(self)
|
||||
link_handler = link_handler or self._linkHandler
|
||||
self.web.set_bridge_command(link_handler, web_context)
|
||||
self.web.stdHtml(
|
||||
self._centerBody % buf, css=["toolbar.css", "toolbar-bottom.css"]
|
||||
self._centerBody % buf,
|
||||
css=["toolbar.css", "toolbar-bottom.css"],
|
||||
context=web_context,
|
||||
)
|
||||
self.web.adjustHeightToFit()
|
||||
|
@ -1,14 +1,18 @@
|
||||
# Copyright: Ankitects Pty Ltd and contributors
|
||||
# -*- coding: utf-8 -*-
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import aqt
|
||||
from anki.lang import _
|
||||
from anki.rsbackend import StringsGroup
|
||||
from anki.utils import invalidFilename, isMac, isWin, noBundledLibs, versionWithBuild
|
||||
from aqt.qt import *
|
||||
from aqt.theme import theme_manager
|
||||
@ -27,6 +31,15 @@ def locale_dir() -> str:
|
||||
return os.path.join(aqt_data_folder(), "locale")
|
||||
|
||||
|
||||
def tr(group: StringsGroup, key: str, **kwargs: Union[str, int, float]) -> str:
|
||||
"""Shortcut to access translations from the backend.
|
||||
(Currently) requires an open collection."""
|
||||
if aqt.mw.col:
|
||||
return aqt.mw.col.backend.translate(group, key, **kwargs)
|
||||
else:
|
||||
return key
|
||||
|
||||
|
||||
def openHelp(section):
|
||||
link = aqt.appHelpSite
|
||||
if section:
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Copyright: Ankitects Pty Ltd and contributors
|
||||
# -*- coding: utf-8 -*-
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
import dataclasses
|
||||
import json
|
||||
import math
|
||||
import sys
|
||||
@ -96,14 +97,76 @@ class AnkiWebPage(QWebEnginePage): # type: ignore
|
||||
return self._onBridgeCmd(str)
|
||||
|
||||
|
||||
# Add-ons
|
||||
##########################################################################
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class WebContent:
|
||||
"""Stores all dynamically modified content that a particular web view
|
||||
will be populated with.
|
||||
|
||||
Attributes:
|
||||
body {str} -- HTML body
|
||||
head {str} -- HTML head
|
||||
css {List[str]} -- List of media server subpaths,
|
||||
each pointing to a CSS file
|
||||
js {List[str]} -- List of media server subpaths,
|
||||
each pointing to a JS file
|
||||
|
||||
Important Notes:
|
||||
- When modifying the attributes specified above, please make sure your
|
||||
changes only perform the minimum requried edits to make your add-on work.
|
||||
You should avoid overwriting or interfering with existing data as much
|
||||
as possible, instead opting to append your own changes, e.g.:
|
||||
|
||||
def on_webview_will_set_content(web_content: WebContent, context):
|
||||
web_content.body += "<my_html>"
|
||||
web_content.head += "<my_head>"
|
||||
|
||||
- The paths specified in `css` and `js` need to be accessible by Anki's
|
||||
media server. All list members without a specified subpath are assumed
|
||||
to be located under `/_anki`, which is the media server subpath used
|
||||
for all web assets shipped with Anki.
|
||||
|
||||
Add-ons may expose their own web assets by utilizing
|
||||
aqt.addons.AddonManager.setWebExports(). Web exports registered
|
||||
in this manner may then be accessed under the `/_addons` subpath.
|
||||
|
||||
E.g., to allow access to a `my-addon.js` and `my-addon.css` residing
|
||||
in a "web" subfolder in your add-on package, first register the
|
||||
corresponding web export:
|
||||
|
||||
> from aqt import mw
|
||||
> mw.addonManager.setWebExports(__name__, r"web/.*(css|js)")
|
||||
|
||||
Then append the subpaths to the corresponding web_content fields
|
||||
within a function subscribing to gui_hooks.webview_will_set_content:
|
||||
|
||||
def on_webview_will_set_content(web_content: WebContent, context):
|
||||
addon_package = mw.addonManager.addonFromModule(__name__)
|
||||
web_content.css.append(
|
||||
f"/_addons/{addon_package}/web/my-addon.css")
|
||||
web_content.js.append(
|
||||
f"/_addons/{addon_package}/web/my-addon.js")
|
||||
"""
|
||||
|
||||
body: str = ""
|
||||
head: str = ""
|
||||
css: List[str] = dataclasses.field(default_factory=lambda: [])
|
||||
js: List[str] = dataclasses.field(default_factory=lambda: [])
|
||||
|
||||
|
||||
# Main web view
|
||||
##########################################################################
|
||||
|
||||
|
||||
class AnkiWebView(QWebEngineView): # type: ignore
|
||||
def __init__(self, parent: Optional[QWidget] = None) -> None:
|
||||
def __init__(
|
||||
self, parent: Optional[QWidget] = None, title: str = "default"
|
||||
) -> None:
|
||||
QWebEngineView.__init__(self, parent=parent) # type: ignore
|
||||
self.title = "default"
|
||||
self.title = title
|
||||
self._page = AnkiWebPage(self._onBridgeCmd)
|
||||
self._page.setBackgroundColor(self._getWindowColor()) # reduce flicker
|
||||
|
||||
@ -254,11 +317,23 @@ class AnkiWebView(QWebEngineView): # type: ignore
|
||||
return QColor("#ececec")
|
||||
return self.style().standardPalette().color(QPalette.Window)
|
||||
|
||||
def stdHtml(self, body, css=None, js=None, head=""):
|
||||
if css is None:
|
||||
css = []
|
||||
if js is None:
|
||||
js = ["jquery.js"]
|
||||
def stdHtml(
|
||||
self,
|
||||
body: str,
|
||||
css: Optional[List[str]] = None,
|
||||
js: Optional[List[str]] = None,
|
||||
head: str = "",
|
||||
context: Optional[Any] = None,
|
||||
):
|
||||
|
||||
web_content = WebContent(
|
||||
body=body,
|
||||
head=head,
|
||||
js=["webview.js"] + (["jquery.js"] if js is None else js),
|
||||
css=["webview.css"] + ([] if css is None else css),
|
||||
)
|
||||
|
||||
gui_hooks.webview_will_set_content(web_content, context)
|
||||
|
||||
palette = self.style().standardPalette()
|
||||
color_hl = palette.color(QPalette.Highlight).name()
|
||||
@ -299,16 +374,12 @@ div[contenteditable="true"]:focus {
|
||||
"color_hl_txt": color_hl_txt,
|
||||
}
|
||||
|
||||
csstxt = "\n".join(
|
||||
[self.bundledCSS("webview.css")] + [self.bundledCSS(fname) for fname in css]
|
||||
)
|
||||
jstxt = "\n".join(
|
||||
[self.bundledScript("webview.js")]
|
||||
+ [self.bundledScript(fname) for fname in js]
|
||||
)
|
||||
csstxt = "\n".join(self.bundledCSS(fname) for fname in web_content.css)
|
||||
jstxt = "\n".join(self.bundledScript(fname) for fname in web_content.js)
|
||||
|
||||
from aqt import mw
|
||||
|
||||
head = mw.baseHTML() + head + csstxt + jstxt
|
||||
head = mw.baseHTML() + csstxt + jstxt + web_content.head
|
||||
|
||||
body_class = theme_manager.body_class()
|
||||
|
||||
@ -334,20 +405,25 @@ body {{ zoom: {}; background: {}; {} }}
|
||||
widgetspec,
|
||||
head,
|
||||
body_class,
|
||||
body,
|
||||
web_content.body,
|
||||
)
|
||||
# print(html)
|
||||
self.setHtml(html)
|
||||
|
||||
def webBundlePath(self, path):
|
||||
def webBundlePath(self, path: str) -> str:
|
||||
from aqt import mw
|
||||
|
||||
return "http://127.0.0.1:%d/_anki/%s" % (mw.mediaServer.getPort(), path)
|
||||
if path.startswith("/"):
|
||||
subpath = ""
|
||||
else:
|
||||
subpath = "/_anki/"
|
||||
|
||||
def bundledScript(self, fname):
|
||||
return f"http://127.0.0.1:{mw.mediaServer.getPort()}{subpath}{path}"
|
||||
|
||||
def bundledScript(self, fname: str) -> str:
|
||||
return '<script src="%s"></script>' % self.webBundlePath(fname)
|
||||
|
||||
def bundledCSS(self, fname):
|
||||
def bundledCSS(self, fname: str) -> str:
|
||||
return '<link rel="stylesheet" type="text/css" href="%s">' % self.webBundlePath(
|
||||
fname
|
||||
)
|
||||
|
1
qt/aqt_data/web/imgs/refresh.svg
Normal file
1
qt/aqt_data/web/imgs/refresh.svg
Normal file
@ -0,0 +1 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg width="100%" height="100%" viewBox="0 0 32 32" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:1.41421;"><g id="Layer-1" serif:id="Layer 1"><g><path d="M18.004,13.502l10.741,0l0,-10.83l-10.741,10.83Z" style="fill:#808080;fill-rule:nonzero;stroke:#808080;stroke-width:1px;"/><path d="M24.912,19.779c-1.384,3.394 -4.584,5.486 -8.33,5.486c-5.018,0 -9.093,-4.131 -9.093,-9.149c0,-5.018 4.121,-9.137 9.139,-9.137c2.516,0 4.81,1.026 6.464,2.687l2.604,-3.041c-2.355,-2.296 -5.53,-3.716 -9.079,-3.716c-7.216,0 -13.048,5.85 -13.048,13.066c0,7.216 5.469,13.116 12.685,13.116c5.929,0 10.671,-4.221 12.177,-9.312l-3.519,0Z" style="fill:#808080;fill-rule:nonzero;stroke:#808080;stroke-width:1px;stroke-linejoin:miter;stroke-miterlimit:10;"/></g></g></svg>
|
After Width: | Height: | Size: 1.1 KiB |
@ -17,42 +17,42 @@
|
||||
<item>
|
||||
<layout class="QGridLayout" name="gridLayout">
|
||||
<item row="3" column="0">
|
||||
<widget class="QRadioButton" name="radio4">
|
||||
<widget class="QRadioButton" name="radioAhead">
|
||||
<property name="text">
|
||||
<string>Review ahead</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QRadioButton" name="radio3">
|
||||
<widget class="QRadioButton" name="radioForgot">
|
||||
<property name="text">
|
||||
<string>Review forgotten cards</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="0" column="0">
|
||||
<widget class="QRadioButton" name="radio1">
|
||||
<widget class="QRadioButton" name="radioNew">
|
||||
<property name="text">
|
||||
<string>Increase today's new card limit</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
<widget class="QRadioButton" name="radio2">
|
||||
<widget class="QRadioButton" name="radioRev">
|
||||
<property name="text">
|
||||
<string>Increase today's review card limit</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="5" column="0">
|
||||
<widget class="QRadioButton" name="radio6">
|
||||
<widget class="QRadioButton" name="radioCram">
|
||||
<property name="text">
|
||||
<string>Study by card state or tag</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="0">
|
||||
<widget class="QRadioButton" name="radio5">
|
||||
<widget class="QRadioButton" name="radioPreview">
|
||||
<property name="text">
|
||||
<string>Preview new cards</string>
|
||||
</property>
|
||||
@ -163,11 +163,12 @@
|
||||
</layout>
|
||||
</widget>
|
||||
<tabstops>
|
||||
<tabstop>radio1</tabstop>
|
||||
<tabstop>radio2</tabstop>
|
||||
<tabstop>radio3</tabstop>
|
||||
<tabstop>radio4</tabstop>
|
||||
<tabstop>radio6</tabstop>
|
||||
<tabstop>radioNew</tabstop>
|
||||
<tabstop>radioRev</tabstop>
|
||||
<tabstop>radioForgot</tabstop>
|
||||
<tabstop>radioAhead</tabstop>
|
||||
<tabstop>radioPreview</tabstop>
|
||||
<tabstop>radioCram</tabstop>
|
||||
<tabstop>spin</tabstop>
|
||||
<tabstop>buttonBox</tabstop>
|
||||
</tabstops>
|
||||
|
74
qt/designer/synclog.ui
Normal file
74
qt/designer/synclog.ui
Normal file
@ -0,0 +1,74 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ui version="4.0">
|
||||
<class>Dialog</class>
|
||||
<widget class="QDialog" name="Dialog">
|
||||
<property name="geometry">
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>557</width>
|
||||
<height>295</height>
|
||||
</rect>
|
||||
</property>
|
||||
<property name="windowTitle">
|
||||
<string>Sync</string>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout">
|
||||
<item>
|
||||
<widget class="QPlainTextEdit" name="plainTextEdit">
|
||||
<property name="readOnly">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
<property name="plainText">
|
||||
<string notr="true"/>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QDialogButtonBox" name="buttonBox">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<property name="standardButtons">
|
||||
<set>QDialogButtonBox::Close</set>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
<resources/>
|
||||
<connections>
|
||||
<connection>
|
||||
<sender>buttonBox</sender>
|
||||
<signal>accepted()</signal>
|
||||
<receiver>Dialog</receiver>
|
||||
<slot>accept()</slot>
|
||||
<hints>
|
||||
<hint type="sourcelabel">
|
||||
<x>248</x>
|
||||
<y>254</y>
|
||||
</hint>
|
||||
<hint type="destinationlabel">
|
||||
<x>157</x>
|
||||
<y>274</y>
|
||||
</hint>
|
||||
</hints>
|
||||
</connection>
|
||||
<connection>
|
||||
<sender>buttonBox</sender>
|
||||
<signal>rejected()</signal>
|
||||
<receiver>Dialog</receiver>
|
||||
<slot>reject()</slot>
|
||||
<hints>
|
||||
<hint type="sourcelabel">
|
||||
<x>316</x>
|
||||
<y>260</y>
|
||||
</hint>
|
||||
<hint type="destinationlabel">
|
||||
<x>286</x>
|
||||
<y>274</y>
|
||||
</hint>
|
||||
</hints>
|
||||
</connection>
|
||||
</connections>
|
||||
</ui>
|
2
qt/i18n/.gitignore
vendored
2
qt/i18n/.gitignore
vendored
@ -1 +1,3 @@
|
||||
.build
|
||||
po
|
||||
ftl
|
||||
|
@ -3,14 +3,14 @@
|
||||
# build mo files
|
||||
#
|
||||
|
||||
targetDir="../aqt_data/locale"
|
||||
targetDir="../aqt_data/locale/gettext"
|
||||
mkdir -p $targetDir
|
||||
|
||||
echo "Compiling *.po..."
|
||||
for file in translations/anki.pot/*
|
||||
for file in po/desktop/*/anki.po
|
||||
do
|
||||
outdir=$(echo $file | \
|
||||
perl -pe "s%translations/anki.pot/(.*)%$targetDir/\1/LC_MESSAGES%")
|
||||
perl -pe "s%po/desktop/(.*)/anki.po%$targetDir/\1/LC_MESSAGES%")
|
||||
outfile="$outdir/anki.mo"
|
||||
mkdir -p $outdir
|
||||
msgfmt $file --output-file=$outfile
|
||||
|
@ -4,7 +4,7 @@
|
||||
#
|
||||
|
||||
import os, re, sys
|
||||
po_dir = "translations/anki.pot"
|
||||
po_dir = "po/desktop"
|
||||
|
||||
msg_re = re.compile(r"^(msgid|msgid_plural|msgstr|)(\[[\d]\])? \"(.*)\"$")
|
||||
cont_re = re.compile(r"^\"(.*)\"$")
|
||||
@ -100,8 +100,11 @@ def fix_po(path):
|
||||
return len(problems)
|
||||
|
||||
problems = 0
|
||||
for po in os.listdir(po_dir):
|
||||
path = os.path.join(po_dir, po)
|
||||
for fname in os.listdir(po_dir):
|
||||
path = os.path.join(po_dir, fname)
|
||||
if not os.path.isdir(path):
|
||||
continue
|
||||
path = os.path.join(path, "anki.po")
|
||||
problems += fix_po(path)
|
||||
|
||||
if problems:
|
||||
|
5
qt/i18n/copy-ftl-files
Executable file
5
qt/i18n/copy-ftl-files
Executable file
@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
targetDir=../aqt_data/locale/fluent
|
||||
test -d $targetDir || mkdir -p $targetDir
|
||||
rsync -a --delete --exclude=templates ftl/core/* $targetDir/
|
@ -2,7 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
out=../aqt_data/locale
|
||||
out=../aqt_data/locale/qt
|
||||
mkdir -p $out
|
||||
|
||||
qtTranslations=$(python -c "from PyQt5.QtCore import *; print(QLibraryInfo.location(QLibraryInfo.TranslationsPath))")
|
||||
|
41
qt/i18n/extract-po-string.py
Normal file
41
qt/i18n/extract-po-string.py
Normal file
@ -0,0 +1,41 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
import polib
|
||||
|
||||
# extract a translated string from the gettext catalogs and insert it into ftl
|
||||
# eg:
|
||||
# $ python extract-po-string.py media-check.ftl delete-unused "Delete Unused Media" 1
|
||||
ftl_filename, key, msgid, dry_run = sys.argv[1:]
|
||||
|
||||
print("Loading catalogs...")
|
||||
base = "po/desktop"
|
||||
langs = [d for d in os.listdir(base) if d != "anki.pot"]
|
||||
cats = []
|
||||
for lang in langs:
|
||||
po_path = os.path.join(base, lang, "anki.po")
|
||||
cat = polib.pofile(po_path)
|
||||
cats.append((lang, cat))
|
||||
|
||||
to_insert = []
|
||||
for (lang, cat) in cats:
|
||||
for entry in cat:
|
||||
if entry.msgid == msgid:
|
||||
if entry.msgstr:
|
||||
print(lang, "has", entry.msgstr)
|
||||
to_insert.append((lang, entry.msgstr))
|
||||
break
|
||||
|
||||
for lang, translation in to_insert:
|
||||
dir = os.path.join("ftl", "core", lang)
|
||||
ftl_path = os.path.join(dir, ftl_filename)
|
||||
|
||||
if dry_run == "1":
|
||||
continue
|
||||
|
||||
if not os.path.exists(dir):
|
||||
os.mkdir(dir)
|
||||
|
||||
open(ftl_path, "a").write(f"\n{key} = {translation}\n")
|
||||
|
||||
print("done")
|
16
qt/i18n/pull-git
Executable file
16
qt/i18n/pull-git
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ! -d po ]; then
|
||||
git clone https://github.com/ankitects/anki-desktop-i18n po
|
||||
fi
|
||||
|
||||
if [ ! -d ftl ]; then
|
||||
git clone https://github.com/ankitects/anki-core-i18n ftl
|
||||
fi
|
||||
|
||||
echo "Updating translations from git..."
|
||||
(cd po && git pull)
|
||||
(cd ftl && git pull)
|
||||
|
||||
# make sure gettext translations haven't broken something
|
||||
python check-po-files.py
|
1
qt/i18n/requirements.txt
Normal file
1
qt/i18n/requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
polib
|
12
qt/i18n/sync-git
Executable file
12
qt/i18n/sync-git
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# pull any pending changes from git repos
|
||||
./pull-git
|
||||
|
||||
# upload changes to .pot
|
||||
./update-po-template
|
||||
(cd po && git add desktop; git commit -m update; git push)
|
||||
|
||||
# upload changes to ftl templates
|
||||
./update-ftl-templates
|
||||
(cd ftl && git add core; git commit -m update; git push)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user