Merge branch 'master' of https://github.com/dae/anki
This commit is contained in:
commit
c513537c02
@ -47,8 +47,8 @@ MODEL_CLOZE = 1
|
|||||||
# deck schema & syncing vars
|
# deck schema & syncing vars
|
||||||
SCHEMA_VERSION = 11
|
SCHEMA_VERSION = 11
|
||||||
SYNC_ZIP_SIZE = int(2.5*1024*1024)
|
SYNC_ZIP_SIZE = int(2.5*1024*1024)
|
||||||
SYNC_ZIP_COUNT = 100
|
SYNC_ZIP_COUNT = 25
|
||||||
SYNC_URL = os.environ.get("SYNC_URL") or "https://ankiweb.net/sync/"
|
SYNC_BASE = "https://ankiweb.net/"
|
||||||
SYNC_VER = 8
|
SYNC_VER = 8
|
||||||
|
|
||||||
HELP_SITE="http://ankisrs.net/docs/manual.html"
|
HELP_SITE="http://ankisrs.net/docs/manual.html"
|
||||||
|
@ -258,7 +258,9 @@ class AnkiPackageExporter(AnkiExporter):
|
|||||||
media[c] = file
|
media[c] = file
|
||||||
# tidy up intermediate files
|
# tidy up intermediate files
|
||||||
os.unlink(colfile)
|
os.unlink(colfile)
|
||||||
os.unlink(path.replace(".apkg", ".media.db"))
|
p = path.replace(".apkg", ".media.db2")
|
||||||
|
if os.path.exists(p):
|
||||||
|
os.unlink(p)
|
||||||
os.chdir(self.mediaDir)
|
os.chdir(self.mediaDir)
|
||||||
shutil.rmtree(path.replace(".apkg", ".media"))
|
shutil.rmtree(path.replace(".apkg", ".media"))
|
||||||
return media
|
return media
|
||||||
|
@ -91,8 +91,8 @@ select distinct(n.id) from cards c, notes n where c.nid=n.id and """+preds
|
|||||||
token += c
|
token += c
|
||||||
else:
|
else:
|
||||||
inQuote = c
|
inQuote = c
|
||||||
# separator
|
# separator (space and ideographic space)
|
||||||
elif c == " ":
|
elif c in (" ", u'\u3000'):
|
||||||
if inQuote:
|
if inQuote:
|
||||||
token += c
|
token += c
|
||||||
elif token:
|
elif token:
|
||||||
|
326
anki/media.py
326
anki/media.py
@ -3,19 +3,18 @@
|
|||||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
import traceback
|
||||||
import urllib
|
import urllib
|
||||||
import unicodedata
|
import unicodedata
|
||||||
import sys
|
import sys
|
||||||
import zipfile
|
import zipfile
|
||||||
from cStringIO import StringIO
|
from cStringIO import StringIO
|
||||||
|
|
||||||
import send2trash
|
|
||||||
from anki.utils import checksum, isWin, isMac, json
|
from anki.utils import checksum, isWin, isMac, json
|
||||||
from anki.db import DB
|
from anki.db import DB
|
||||||
from anki.consts import *
|
from anki.consts import *
|
||||||
from anki.latex import mungeQA
|
from anki.latex import mungeQA
|
||||||
|
|
||||||
|
|
||||||
class MediaManager(object):
|
class MediaManager(object):
|
||||||
|
|
||||||
soundRegexps = ["(?i)(\[sound:(?P<fname>[^]]+)\])"]
|
soundRegexps = ["(?i)(\[sound:(?P<fname>[^]]+)\])"]
|
||||||
@ -54,12 +53,54 @@ class MediaManager(object):
|
|||||||
def connect(self):
|
def connect(self):
|
||||||
if self.col.server:
|
if self.col.server:
|
||||||
return
|
return
|
||||||
path = self.dir()+".db"
|
path = self.dir()+".db2"
|
||||||
create = not os.path.exists(path)
|
create = not os.path.exists(path)
|
||||||
os.chdir(self._dir)
|
os.chdir(self._dir)
|
||||||
self.db = DB(path)
|
self.db = DB(path)
|
||||||
if create:
|
if create:
|
||||||
self._initDB()
|
self._initDB()
|
||||||
|
self.maybeUpgrade()
|
||||||
|
|
||||||
|
def _initDB(self):
|
||||||
|
self.db.executescript("""
|
||||||
|
create table media (
|
||||||
|
fname text not null primary key,
|
||||||
|
csum text, -- null indicates deleted file
|
||||||
|
mtime int not null, -- zero if deleted
|
||||||
|
dirty int not null
|
||||||
|
);
|
||||||
|
|
||||||
|
create index idx_media_dirty on media (dirty);
|
||||||
|
|
||||||
|
create table meta (dirMod int, lastUsn int); insert into meta values (0, 0);
|
||||||
|
""")
|
||||||
|
|
||||||
|
def maybeUpgrade(self):
|
||||||
|
oldpath = self.dir()+".db"
|
||||||
|
if os.path.exists(oldpath):
|
||||||
|
self.db.execute('attach "../collection.media.db" as old')
|
||||||
|
try:
|
||||||
|
self.db.execute("""
|
||||||
|
insert into media
|
||||||
|
select m.fname, csum, mod, ifnull((select 1 from log l2 where l2.fname=m.fname), 0) as dirty
|
||||||
|
from old.media m
|
||||||
|
left outer join old.log l using (fname)
|
||||||
|
union
|
||||||
|
select fname, null, 0, 1 from old.log where type=1;""")
|
||||||
|
self.db.execute("delete from meta")
|
||||||
|
self.db.execute("""
|
||||||
|
insert into meta select dirMod, usn from old.meta
|
||||||
|
""")
|
||||||
|
self.db.commit()
|
||||||
|
except Exception, e:
|
||||||
|
# if we couldn't import the old db for some reason, just start
|
||||||
|
# anew
|
||||||
|
self.col.log("failed to import old media db:"+traceback.format_exc())
|
||||||
|
self.db.execute("detach old")
|
||||||
|
npath = "../collection.media.db.old"
|
||||||
|
if os.path.exists(npath):
|
||||||
|
os.unlink(npath)
|
||||||
|
os.rename("../collection.media.db", npath)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
if self.col.server:
|
if self.col.server:
|
||||||
@ -268,75 +309,6 @@ class MediaManager(object):
|
|||||||
def have(self, fname):
|
def have(self, fname):
|
||||||
return os.path.exists(os.path.join(self.dir(), fname))
|
return os.path.exists(os.path.join(self.dir(), fname))
|
||||||
|
|
||||||
# Media syncing - changes and removal
|
|
||||||
##########################################################################
|
|
||||||
|
|
||||||
def hasChanged(self):
|
|
||||||
return self.db.scalar("select 1 from log limit 1")
|
|
||||||
|
|
||||||
def removed(self):
|
|
||||||
return self.db.list("select * from log where type = ?", MEDIA_REM)
|
|
||||||
|
|
||||||
def syncRemove(self, fnames):
|
|
||||||
# remove provided deletions
|
|
||||||
for f in fnames:
|
|
||||||
if os.path.exists(f):
|
|
||||||
send2trash.send2trash(f)
|
|
||||||
self.db.execute("delete from log where fname = ?", f)
|
|
||||||
self.db.execute("delete from media where fname = ?", f)
|
|
||||||
# and all locally-logged deletions, as server has acked them
|
|
||||||
self.db.execute("delete from log where type = ?", MEDIA_REM)
|
|
||||||
self.db.commit()
|
|
||||||
|
|
||||||
# Media syncing - unbundling zip files from server
|
|
||||||
##########################################################################
|
|
||||||
|
|
||||||
def syncAdd(self, zipData):
|
|
||||||
"Extract zip data; true if finished."
|
|
||||||
f = StringIO(zipData)
|
|
||||||
z = zipfile.ZipFile(f, "r")
|
|
||||||
finished = False
|
|
||||||
meta = None
|
|
||||||
media = []
|
|
||||||
# get meta info first
|
|
||||||
meta = json.loads(z.read("_meta"))
|
|
||||||
nextUsn = int(z.read("_usn"))
|
|
||||||
# then loop through all files
|
|
||||||
for i in z.infolist():
|
|
||||||
if i.filename == "_meta" or i.filename == "_usn":
|
|
||||||
# ignore previously-retrieved meta
|
|
||||||
continue
|
|
||||||
elif i.filename == "_finished":
|
|
||||||
# last zip in set
|
|
||||||
finished = True
|
|
||||||
else:
|
|
||||||
data = z.read(i)
|
|
||||||
csum = checksum(data)
|
|
||||||
name = meta[i.filename]
|
|
||||||
if not isinstance(name, unicode):
|
|
||||||
name = unicode(name, "utf8")
|
|
||||||
# normalize name for platform
|
|
||||||
if isMac:
|
|
||||||
name = unicodedata.normalize("NFD", name)
|
|
||||||
else:
|
|
||||||
name = unicodedata.normalize("NFC", name)
|
|
||||||
# save file
|
|
||||||
open(name, "wb").write(data)
|
|
||||||
# update db
|
|
||||||
media.append((name, csum, self._mtime(name)))
|
|
||||||
# remove entries from local log
|
|
||||||
self.db.execute("delete from log where fname = ?", name)
|
|
||||||
# update media db and note new starting usn
|
|
||||||
if media:
|
|
||||||
self.db.executemany(
|
|
||||||
"insert or replace into media values (?,?,?)", media)
|
|
||||||
self.setUsn(nextUsn) # commits
|
|
||||||
# if we have finished adding, we need to record the new folder mtime
|
|
||||||
# so that we don't trigger a needless scan
|
|
||||||
if finished:
|
|
||||||
self.syncMod()
|
|
||||||
return finished
|
|
||||||
|
|
||||||
# Illegal characters
|
# Illegal characters
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
@ -351,57 +323,16 @@ class MediaManager(object):
|
|||||||
return True
|
return True
|
||||||
return not not re.search(self._illegalCharReg, str)
|
return not not re.search(self._illegalCharReg, str)
|
||||||
|
|
||||||
# Media syncing - bundling zip files to send to server
|
# Tracking changes
|
||||||
##########################################################################
|
|
||||||
# Because there's no standard filename encoding for zips, and because not
|
|
||||||
# all zip clients support retrieving mtime, we store the files as ascii
|
|
||||||
# and place a json file in the zip with the necessary information.
|
|
||||||
|
|
||||||
def zipAdded(self):
|
|
||||||
"Add files to a zip until over SYNC_ZIP_SIZE/COUNT. Return zip data."
|
|
||||||
f = StringIO()
|
|
||||||
z = zipfile.ZipFile(f, "w", compression=zipfile.ZIP_DEFLATED)
|
|
||||||
sz = 0
|
|
||||||
cnt = 0
|
|
||||||
files = {}
|
|
||||||
cur = self.db.execute(
|
|
||||||
"select fname from log where type = ?", MEDIA_ADD)
|
|
||||||
fnames = []
|
|
||||||
while 1:
|
|
||||||
fname = cur.fetchone()
|
|
||||||
if not fname:
|
|
||||||
# add a flag so the server knows it can clean up
|
|
||||||
z.writestr("_finished", "")
|
|
||||||
break
|
|
||||||
fname = fname[0]
|
|
||||||
# we add it as a one-element array simply to make
|
|
||||||
# the later forgetAdded() call easier
|
|
||||||
fnames.append([fname])
|
|
||||||
z.write(fname, str(cnt))
|
|
||||||
files[str(cnt)] = unicodedata.normalize("NFC", fname)
|
|
||||||
sz += os.path.getsize(fname)
|
|
||||||
if sz >= SYNC_ZIP_SIZE or cnt >= SYNC_ZIP_COUNT:
|
|
||||||
break
|
|
||||||
cnt += 1
|
|
||||||
z.writestr("_meta", json.dumps(files))
|
|
||||||
z.close()
|
|
||||||
return f.getvalue(), fnames
|
|
||||||
|
|
||||||
def forgetAdded(self, fnames):
|
|
||||||
if not fnames:
|
|
||||||
return
|
|
||||||
self.db.executemany("delete from log where fname = ?", fnames)
|
|
||||||
self.db.commit()
|
|
||||||
|
|
||||||
# Tracking changes (private)
|
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
def _initDB(self):
|
def findChanges(self):
|
||||||
self.db.executescript("""
|
"Scan the media folder if it's changed, and note any changes."
|
||||||
create table media (fname text primary key, csum text, mod int);
|
if self._changed():
|
||||||
create table meta (dirMod int, usn int); insert into meta values (0, 0);
|
self._logChanges()
|
||||||
create table log (fname text primary key, type int);
|
|
||||||
""")
|
def haveDirty(self):
|
||||||
|
return self.db.scalar("select 1 from media where dirty=1 limit 1")
|
||||||
|
|
||||||
def _mtime(self, path):
|
def _mtime(self, path):
|
||||||
return int(os.stat(path).st_mtime)
|
return int(os.stat(path).st_mtime)
|
||||||
@ -409,17 +340,6 @@ create table log (fname text primary key, type int);
|
|||||||
def _checksum(self, path):
|
def _checksum(self, path):
|
||||||
return checksum(open(path, "rb").read())
|
return checksum(open(path, "rb").read())
|
||||||
|
|
||||||
def usn(self):
|
|
||||||
return self.db.scalar("select usn from meta")
|
|
||||||
|
|
||||||
def setUsn(self, usn):
|
|
||||||
self.db.execute("update meta set usn = ?", usn)
|
|
||||||
self.db.commit()
|
|
||||||
|
|
||||||
def syncMod(self):
|
|
||||||
self.db.execute("update meta set dirMod = ?", self._mtime(self.dir()))
|
|
||||||
self.db.commit()
|
|
||||||
|
|
||||||
def _changed(self):
|
def _changed(self):
|
||||||
"Return dir mtime if it has changed since the last findChanges()"
|
"Return dir mtime if it has changed since the last findChanges()"
|
||||||
# doesn't track edits, but user can add or remove a file to update
|
# doesn't track edits, but user can add or remove a file to update
|
||||||
@ -429,38 +349,24 @@ create table log (fname text primary key, type int);
|
|||||||
return False
|
return False
|
||||||
return mtime
|
return mtime
|
||||||
|
|
||||||
def findChanges(self):
|
|
||||||
"Scan the media folder if it's changed, and note any changes."
|
|
||||||
if self._changed():
|
|
||||||
self._logChanges()
|
|
||||||
|
|
||||||
def _logChanges(self):
|
def _logChanges(self):
|
||||||
(added, removed) = self._changes()
|
(added, removed) = self._changes()
|
||||||
log = []
|
|
||||||
media = []
|
media = []
|
||||||
mediaRem = []
|
|
||||||
for f in added:
|
for f in added:
|
||||||
mt = self._mtime(f)
|
mt = self._mtime(f)
|
||||||
media.append((f, self._checksum(f), mt))
|
media.append((f, self._checksum(f), mt, 1))
|
||||||
log.append((f, MEDIA_ADD))
|
|
||||||
for f in removed:
|
for f in removed:
|
||||||
mediaRem.append((f,))
|
media.append((f, None, 0, 1))
|
||||||
log.append((f, MEDIA_REM))
|
|
||||||
# update media db
|
# update media db
|
||||||
self.db.executemany("insert or replace into media values (?,?,?)",
|
self.db.executemany("insert or replace into media values (?,?,?,?)",
|
||||||
media)
|
media)
|
||||||
if mediaRem:
|
|
||||||
self.db.executemany("delete from media where fname = ?",
|
|
||||||
mediaRem)
|
|
||||||
self.db.execute("update meta set dirMod = ?", self._mtime(self.dir()))
|
self.db.execute("update meta set dirMod = ?", self._mtime(self.dir()))
|
||||||
# and logs
|
|
||||||
self.db.executemany("insert or replace into log values (?,?)", log)
|
|
||||||
self.db.commit()
|
self.db.commit()
|
||||||
|
|
||||||
def _changes(self):
|
def _changes(self):
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
for (name, csum, mod) in self.db.execute(
|
for (name, csum, mod) in self.db.execute(
|
||||||
"select * from media"):
|
"select fname, csum, mtime from media where csum is not null"):
|
||||||
self.cache[name] = [csum, mod, False]
|
self.cache[name] = [csum, mod, False]
|
||||||
added = []
|
added = []
|
||||||
removed = []
|
removed = []
|
||||||
@ -495,34 +401,106 @@ create table log (fname text primary key, type int);
|
|||||||
removed.append(k)
|
removed.append(k)
|
||||||
return added, removed
|
return added, removed
|
||||||
|
|
||||||
def sanityCheck(self):
|
# Syncing-related
|
||||||
assert not self.db.scalar("select count() from log")
|
##########################################################################
|
||||||
cnt = self.db.scalar("select count() from media")
|
|
||||||
return cnt
|
def lastUsn(self):
|
||||||
|
return self.db.scalar("select lastUsn from meta")
|
||||||
|
|
||||||
|
def setLastUsn(self, usn):
|
||||||
|
self.db.execute("update meta set lastUsn = ?", usn)
|
||||||
|
self.db.commit()
|
||||||
|
|
||||||
|
def syncInfo(self, fname):
|
||||||
|
ret = self.db.first(
|
||||||
|
"select csum, dirty from media where fname=?", fname)
|
||||||
|
return ret or (None, 0)
|
||||||
|
|
||||||
|
def markClean(self, fnames):
|
||||||
|
for fname in fnames:
|
||||||
|
self.db.execute(
|
||||||
|
"update media set dirty=0 where fname=?", fname)
|
||||||
|
|
||||||
|
def syncDelete(self, fname):
|
||||||
|
if os.path.exists(fname):
|
||||||
|
os.unlink(fname)
|
||||||
|
self.db.execute("delete from media where fname=?", fname)
|
||||||
|
|
||||||
|
def mediaCount(self):
|
||||||
|
return self.db.scalar(
|
||||||
|
"select count() from media where csum is not null")
|
||||||
|
|
||||||
def forceResync(self):
|
def forceResync(self):
|
||||||
self.db.execute("delete from media")
|
self.db.execute("delete from media")
|
||||||
self.db.execute("delete from log")
|
self.db.execute("vacuum analyze")
|
||||||
self.db.execute("update meta set usn = 0, dirMod = 0")
|
|
||||||
self.db.commit()
|
self.db.commit()
|
||||||
|
|
||||||
def removeExisting(self, files):
|
# Media syncing: zips
|
||||||
"Remove files from list of files to sync, and return missing files."
|
##########################################################################
|
||||||
need = []
|
|
||||||
remove = []
|
def mediaChangesZip(self):
|
||||||
for f in files:
|
f = StringIO()
|
||||||
if isMac:
|
z = zipfile.ZipFile(f, "w", compression=zipfile.ZIP_DEFLATED)
|
||||||
name = unicodedata.normalize("NFD", f)
|
|
||||||
|
fnames = []
|
||||||
|
# meta is list of (fname, zipname), where zipname of None
|
||||||
|
# is a deleted file
|
||||||
|
meta = []
|
||||||
|
sz = 0
|
||||||
|
|
||||||
|
for c, (fname, csum) in enumerate(self.db.execute(
|
||||||
|
"select fname, csum from media where dirty=1"
|
||||||
|
" limit %d"%SYNC_ZIP_COUNT)):
|
||||||
|
|
||||||
|
fnames.append(fname)
|
||||||
|
normname = unicodedata.normalize("NFC", fname)
|
||||||
|
|
||||||
|
if csum:
|
||||||
|
self.col.log("+media zip", fname)
|
||||||
|
z.write(fname, str(c))
|
||||||
|
meta.append((normname, str(c)))
|
||||||
|
sz += os.path.getsize(fname)
|
||||||
else:
|
else:
|
||||||
name = f
|
self.col.log("-media zip", fname)
|
||||||
if self.db.scalar("select 1 from log where fname=?", name):
|
meta.append((normname, ""))
|
||||||
remove.append((name,))
|
|
||||||
|
if sz >= SYNC_ZIP_SIZE:
|
||||||
|
break
|
||||||
|
|
||||||
|
z.writestr("_meta", json.dumps(meta))
|
||||||
|
z.close()
|
||||||
|
return f.getvalue(), fnames
|
||||||
|
|
||||||
|
def addFilesFromZip(self, zipData):
|
||||||
|
"Extract zip data; true if finished."
|
||||||
|
f = StringIO(zipData)
|
||||||
|
z = zipfile.ZipFile(f, "r")
|
||||||
|
media = []
|
||||||
|
# get meta info first
|
||||||
|
meta = json.loads(z.read("_meta"))
|
||||||
|
# then loop through all files
|
||||||
|
cnt = 0
|
||||||
|
for i in z.infolist():
|
||||||
|
if i.filename == "_meta":
|
||||||
|
# ignore previously-retrieved meta
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
need.append(f)
|
data = z.read(i)
|
||||||
self.db.executemany("delete from log where fname=?", remove)
|
csum = checksum(data)
|
||||||
self.db.commit()
|
name = meta[i.filename]
|
||||||
# if we need all the server files, it's faster to pass None than
|
if not isinstance(name, unicode):
|
||||||
# the full list
|
name = unicode(name, "utf8")
|
||||||
if need and len(files) == len(need):
|
# normalize name for platform
|
||||||
return None
|
if isMac:
|
||||||
return need
|
name = unicodedata.normalize("NFD", name)
|
||||||
|
else:
|
||||||
|
name = unicodedata.normalize("NFC", name)
|
||||||
|
# save file
|
||||||
|
open(name, "wb").write(data)
|
||||||
|
# update db
|
||||||
|
media.append((name, csum, self._mtime(name), 0))
|
||||||
|
cnt += 1
|
||||||
|
if media:
|
||||||
|
self.db.executemany(
|
||||||
|
"insert or replace into media values (?,?,?,?)", media)
|
||||||
|
return cnt
|
||||||
|
267
anki/sync.py
267
anki/sync.py
@ -15,7 +15,6 @@ from anki.consts import *
|
|||||||
from hooks import runHook
|
from hooks import runHook
|
||||||
import anki
|
import anki
|
||||||
|
|
||||||
|
|
||||||
# syncing vars
|
# syncing vars
|
||||||
HTTP_TIMEOUT = 90
|
HTTP_TIMEOUT = 90
|
||||||
HTTP_PROXY = None
|
HTTP_PROXY = None
|
||||||
@ -539,6 +538,7 @@ class HttpSyncer(object):
|
|||||||
self.hkey = hkey
|
self.hkey = hkey
|
||||||
self.skey = checksum(str(random.random()))[:8]
|
self.skey = checksum(str(random.random()))[:8]
|
||||||
self.con = con or httpCon()
|
self.con = con or httpCon()
|
||||||
|
self.postVars = {}
|
||||||
|
|
||||||
def assertOk(self, resp):
|
def assertOk(self, resp):
|
||||||
if resp['status'] != '200':
|
if resp['status'] != '200':
|
||||||
@ -550,18 +550,13 @@ class HttpSyncer(object):
|
|||||||
# costly. We could send it as a raw post, but more HTTP clients seem to
|
# costly. We could send it as a raw post, but more HTTP clients seem to
|
||||||
# support file uploading, so this is the more compatible choice.
|
# support file uploading, so this is the more compatible choice.
|
||||||
|
|
||||||
def req(self, method, fobj=None, comp=6,
|
def req(self, method, fobj=None, comp=6, badAuthRaises=False):
|
||||||
badAuthRaises=True, hkey=True):
|
|
||||||
BOUNDARY="Anki-sync-boundary"
|
BOUNDARY="Anki-sync-boundary"
|
||||||
bdry = "--"+BOUNDARY
|
bdry = "--"+BOUNDARY
|
||||||
buf = StringIO()
|
buf = StringIO()
|
||||||
# compression flag and session key as post vars
|
# post vars
|
||||||
vars = {}
|
self.postVars['c'] = 1 if comp else 0
|
||||||
vars['c'] = 1 if comp else 0
|
for (key, value) in self.postVars.items():
|
||||||
if hkey:
|
|
||||||
vars['k'] = self.hkey
|
|
||||||
vars['s'] = self.skey
|
|
||||||
for (key, value) in vars.items():
|
|
||||||
buf.write(bdry + "\r\n")
|
buf.write(bdry + "\r\n")
|
||||||
buf.write(
|
buf.write(
|
||||||
'Content-Disposition: form-data; name="%s"\r\n\r\n%s\r\n' %
|
'Content-Disposition: form-data; name="%s"\r\n\r\n%s\r\n' %
|
||||||
@ -595,7 +590,7 @@ Content-Type: application/octet-stream\r\n\r\n""")
|
|||||||
body = buf.getvalue()
|
body = buf.getvalue()
|
||||||
buf.close()
|
buf.close()
|
||||||
resp, cont = self.con.request(
|
resp, cont = self.con.request(
|
||||||
SYNC_URL+method, "POST", headers=headers, body=body)
|
self.syncURL()+method, "POST", headers=headers, body=body)
|
||||||
if not badAuthRaises:
|
if not badAuthRaises:
|
||||||
# return false if bad auth instead of raising
|
# return false if bad auth instead of raising
|
||||||
if resp['status'] == '403':
|
if resp['status'] == '403':
|
||||||
@ -611,11 +606,17 @@ class RemoteServer(HttpSyncer):
|
|||||||
def __init__(self, hkey):
|
def __init__(self, hkey):
|
||||||
HttpSyncer.__init__(self, hkey)
|
HttpSyncer.__init__(self, hkey)
|
||||||
|
|
||||||
|
def syncURL(self):
|
||||||
|
if os.getenv("DEV"):
|
||||||
|
return "http://localhost:5000/sync/"
|
||||||
|
return SYNC_BASE + "sync/"
|
||||||
|
|
||||||
def hostKey(self, user, pw):
|
def hostKey(self, user, pw):
|
||||||
"Returns hkey or none if user/pw incorrect."
|
"Returns hkey or none if user/pw incorrect."
|
||||||
|
self.postVars = dict()
|
||||||
ret = self.req(
|
ret = self.req(
|
||||||
"hostKey", StringIO(json.dumps(dict(u=user, p=pw))),
|
"hostKey", StringIO(json.dumps(dict(u=user, p=pw))),
|
||||||
badAuthRaises=False, hkey=False)
|
badAuthRaises=False)
|
||||||
if not ret:
|
if not ret:
|
||||||
# invalid auth
|
# invalid auth
|
||||||
return
|
return
|
||||||
@ -623,6 +624,10 @@ class RemoteServer(HttpSyncer):
|
|||||||
return self.hkey
|
return self.hkey
|
||||||
|
|
||||||
def meta(self):
|
def meta(self):
|
||||||
|
self.postVars = dict(
|
||||||
|
k=self.hkey,
|
||||||
|
s=self.skey,
|
||||||
|
)
|
||||||
ret = self.req(
|
ret = self.req(
|
||||||
"meta", StringIO(json.dumps(dict(
|
"meta", StringIO(json.dumps(dict(
|
||||||
v=SYNC_VER, cv="ankidesktop,%s,%s"%(anki.version, platDesc())))),
|
v=SYNC_VER, cv="ankidesktop,%s,%s"%(anki.version, platDesc())))),
|
||||||
@ -661,8 +666,15 @@ class FullSyncer(HttpSyncer):
|
|||||||
|
|
||||||
def __init__(self, col, hkey, con):
|
def __init__(self, col, hkey, con):
|
||||||
HttpSyncer.__init__(self, hkey, con)
|
HttpSyncer.__init__(self, hkey, con)
|
||||||
|
self.postVars = dict(
|
||||||
|
k=self.hkey,
|
||||||
|
v="ankidesktop,%s,%s"%(anki.version, platDesc()),
|
||||||
|
)
|
||||||
self.col = col
|
self.col = col
|
||||||
|
|
||||||
|
def syncURL(self):
|
||||||
|
return SYNC_BASE + "sync/"
|
||||||
|
|
||||||
def download(self):
|
def download(self):
|
||||||
runHook("sync", "download")
|
runHook("sync", "download")
|
||||||
self.col.close()
|
self.col.close()
|
||||||
@ -697,117 +709,192 @@ class FullSyncer(HttpSyncer):
|
|||||||
|
|
||||||
# Media syncing
|
# Media syncing
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
#
|
||||||
|
# About conflicts:
|
||||||
|
# - to minimize data loss, if both sides are marked for sending and one
|
||||||
|
# side has been deleted, favour the add
|
||||||
|
# - if added/changed on both sides, favour the server version on the
|
||||||
|
# assumption other syncers are in sync with the server
|
||||||
|
#
|
||||||
|
|
||||||
class MediaSyncer(object):
|
class MediaSyncer(object):
|
||||||
|
|
||||||
def __init__(self, col, server=None):
|
def __init__(self, col, server=None):
|
||||||
self.col = col
|
self.col = col
|
||||||
self.server = server
|
self.server = server
|
||||||
self.added = None
|
|
||||||
|
|
||||||
def sync(self, mediaUsn):
|
def sync(self):
|
||||||
# step 1: check if there have been any changes
|
# check if there have been any changes
|
||||||
runHook("sync", "findMedia")
|
runHook("sync", "findMedia")
|
||||||
lusn = self.col.media.usn()
|
self.col.log("findChanges")
|
||||||
# if first sync or resync, clear list of files we think we've sent
|
|
||||||
if not lusn:
|
|
||||||
self.col.media.forceResync()
|
|
||||||
self.col.media.findChanges()
|
self.col.media.findChanges()
|
||||||
if lusn == mediaUsn and not self.col.media.hasChanged():
|
|
||||||
|
# begin session and check if in sync
|
||||||
|
lastUsn = self.col.media.lastUsn()
|
||||||
|
ret = self.server.begin()
|
||||||
|
srvUsn = ret['usn']
|
||||||
|
if lastUsn == srvUsn and not self.col.media.haveDirty():
|
||||||
return "noChanges"
|
return "noChanges"
|
||||||
# step 1.5: if resyncing, we need to get the list of files the server
|
|
||||||
# has and remove them from our local list of files to sync
|
# loop through and process changes from server
|
||||||
if not lusn:
|
self.col.log("last local usn is %s"%lastUsn)
|
||||||
files = self.server.mediaList()
|
while True:
|
||||||
need = self.col.media.removeExisting(files)
|
data = self.server.mediaChanges(lastUsn=lastUsn)
|
||||||
else:
|
|
||||||
need = None
|
self.col.log("mediaChanges resp count %d"%len(data))
|
||||||
# step 2: send/recv deletions
|
if not data:
|
||||||
runHook("sync", "removeMedia")
|
|
||||||
lrem = self.removed()
|
|
||||||
rrem = self.server.remove(fnames=lrem, minUsn=lusn)
|
|
||||||
self.remove(rrem)
|
|
||||||
# step 3: stream files from server
|
|
||||||
runHook("sync", "server")
|
|
||||||
while 1:
|
|
||||||
runHook("sync", "streamMedia")
|
|
||||||
usn = self.col.media.usn()
|
|
||||||
zip = self.server.files(minUsn=usn, need=need)
|
|
||||||
if self.addFiles(zip=zip):
|
|
||||||
break
|
break
|
||||||
# step 4: stream files to the server
|
|
||||||
runHook("sync", "client")
|
need = []
|
||||||
while 1:
|
lastUsn = data[-1][1]
|
||||||
runHook("sync", "streamMedia")
|
for fname, rusn, rsum in data:
|
||||||
zip, fnames = self.files()
|
lsum, ldirty = self.col.media.syncInfo(fname)
|
||||||
|
self.col.log(
|
||||||
|
"check: lsum=%s rsum=%s ldirty=%d rusn=%d fname=%s"%(
|
||||||
|
(lsum and lsum[0:4]),
|
||||||
|
(rsum and rsum[0:4]),
|
||||||
|
ldirty,
|
||||||
|
rusn,
|
||||||
|
fname))
|
||||||
|
|
||||||
|
if rsum:
|
||||||
|
# added/changed remotely
|
||||||
|
if not lsum or lsum != rsum:
|
||||||
|
self.col.log("will fetch")
|
||||||
|
need.append(fname)
|
||||||
|
else:
|
||||||
|
self.col.log("have same already")
|
||||||
|
ldirty and self.col.media.markClean([fname])
|
||||||
|
elif lsum:
|
||||||
|
# deleted remotely
|
||||||
|
if not ldirty:
|
||||||
|
self.col.log("delete local")
|
||||||
|
self.col.media.syncDelete(fname)
|
||||||
|
else:
|
||||||
|
# conflict; local add overrides remote delete
|
||||||
|
self.col.log("conflict; will send")
|
||||||
|
else:
|
||||||
|
# deleted both sides
|
||||||
|
self.col.log("both sides deleted")
|
||||||
|
ldirty and self.col.media.markClean([fname])
|
||||||
|
|
||||||
|
self._downloadFiles(need)
|
||||||
|
|
||||||
|
self.col.log("update last usn to %d"%lastUsn)
|
||||||
|
self.col.media.setLastUsn(lastUsn) # commits
|
||||||
|
|
||||||
|
# at this point we're all up to date with the server's changes,
|
||||||
|
# and we need to send our own
|
||||||
|
|
||||||
|
updateConflict = False
|
||||||
|
while True:
|
||||||
|
zip, fnames = self.col.media.mediaChangesZip()
|
||||||
if not fnames:
|
if not fnames:
|
||||||
# finished
|
|
||||||
break
|
break
|
||||||
usn = self.server.addFiles(zip=zip)
|
|
||||||
# after server has replied, safe to remove from log
|
processedCnt, serverLastUsn = self.server.uploadChanges(zip)
|
||||||
self.col.media.forgetAdded(fnames)
|
self.col.media.markClean(fnames[0:processedCnt])
|
||||||
self.col.media.setUsn(usn)
|
|
||||||
# step 5: sanity check during beta testing
|
self.col.log("processed %d, serverUsn %d, clientUsn %d" % (
|
||||||
# NOTE: when removing this, need to move server tidyup
|
processedCnt, serverLastUsn, lastUsn
|
||||||
# back from sanity check to addFiles
|
))
|
||||||
c = self.mediaSanity()
|
|
||||||
s = self.server.mediaSanity(client=c)
|
if serverLastUsn - processedCnt == lastUsn:
|
||||||
self.col.log("mediaSanity", c, s)
|
self.col.log("lastUsn in sync, updating local")
|
||||||
if c != s:
|
self.col.media.setLastUsn(serverLastUsn) # commits
|
||||||
# if the sanity check failed, force a resync
|
else:
|
||||||
|
self.col.log("concurrent update, skipping usn update")
|
||||||
|
# commit for markClean
|
||||||
|
self.col.media.db.commit()
|
||||||
|
updateConflict = True
|
||||||
|
|
||||||
|
if updateConflict:
|
||||||
|
self.col.log("restart sync due to concurrent update")
|
||||||
|
return self.sync()
|
||||||
|
|
||||||
|
lcnt = self.col.media.mediaCount()
|
||||||
|
ret = self.server.mediaSanity(local=lcnt)
|
||||||
|
if ret == "OK":
|
||||||
|
return "OK"
|
||||||
|
else:
|
||||||
self.col.media.forceResync()
|
self.col.media.forceResync()
|
||||||
return "sanityCheckFailed"
|
return ret
|
||||||
return "success"
|
|
||||||
|
|
||||||
def removed(self):
|
def _downloadFiles(self, fnames):
|
||||||
return self.col.media.removed()
|
self.col.log("%d files to fetch"%len(fnames))
|
||||||
|
while fnames:
|
||||||
def remove(self, fnames, minUsn=None):
|
top = fnames[0:SYNC_ZIP_COUNT]
|
||||||
self.col.media.syncRemove(fnames)
|
self.col.log("fetch %s"%top)
|
||||||
if minUsn is not None:
|
zipData = self.server.downloadFiles(files=top)
|
||||||
# we're the server
|
cnt = self.col.media.addFilesFromZip(zipData)
|
||||||
return self.col.media.removed()
|
self.col.log("received %d files"%cnt)
|
||||||
|
fnames = fnames[cnt:]
|
||||||
|
|
||||||
def files(self):
|
def files(self):
|
||||||
return self.col.media.zipAdded()
|
return self.col.media.addFilesToZip()
|
||||||
|
|
||||||
def addFiles(self, zip):
|
def addFiles(self, zip):
|
||||||
"True if zip is the last in set. Server returns new usn instead."
|
"True if zip is the last in set. Server returns new usn instead."
|
||||||
return self.col.media.syncAdd(zip)
|
return self.col.media.addFilesFromZip(zip)
|
||||||
|
|
||||||
def mediaSanity(self):
|
|
||||||
return self.col.media.sanityCheck()
|
|
||||||
|
|
||||||
# Remote media syncing
|
# Remote media syncing
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
|
||||||
class RemoteMediaServer(HttpSyncer):
|
class RemoteMediaServer(HttpSyncer):
|
||||||
|
|
||||||
def __init__(self, hkey, con):
|
def __init__(self, col, hkey, con):
|
||||||
|
self.col = col
|
||||||
HttpSyncer.__init__(self, hkey, con)
|
HttpSyncer.__init__(self, hkey, con)
|
||||||
|
|
||||||
def remove(self, **kw):
|
def syncURL(self):
|
||||||
return json.loads(
|
if os.getenv("DEV"):
|
||||||
self.req("remove", StringIO(json.dumps(kw))))
|
return "http://localhost:5001/"
|
||||||
|
return SYNC_BASE + "msync/"
|
||||||
|
|
||||||
def files(self, **kw):
|
def begin(self):
|
||||||
return self.req("files", StringIO(json.dumps(kw)))
|
self.postVars = dict(
|
||||||
|
k=self.hkey,
|
||||||
|
v="ankidesktop,%s,%s"%(anki.version, platDesc())
|
||||||
|
)
|
||||||
|
ret = self._dataOnly(json.loads(self.req(
|
||||||
|
"begin", StringIO(json.dumps(dict())))))
|
||||||
|
self.skey = ret['sk']
|
||||||
|
return ret
|
||||||
|
|
||||||
def addFiles(self, zip):
|
# args: lastUsn
|
||||||
|
def mediaChanges(self, **kw):
|
||||||
|
self.postVars = dict(
|
||||||
|
sk=self.skey,
|
||||||
|
)
|
||||||
|
resp = json.loads(
|
||||||
|
self.req("mediaChanges", StringIO(json.dumps(kw))))
|
||||||
|
return self._dataOnly(resp)
|
||||||
|
|
||||||
|
# args: files
|
||||||
|
def downloadFiles(self, **kw):
|
||||||
|
return self.req("downloadFiles", StringIO(json.dumps(kw)))
|
||||||
|
|
||||||
|
def uploadChanges(self, zip):
|
||||||
# no compression, as we compress the zip file instead
|
# no compression, as we compress the zip file instead
|
||||||
return json.loads(
|
return self._dataOnly(json.loads(
|
||||||
self.req("addFiles", StringIO(zip), comp=0))
|
self.req("uploadChanges", StringIO(zip), comp=0)))
|
||||||
|
|
||||||
|
# args: local
|
||||||
def mediaSanity(self, **kw):
|
def mediaSanity(self, **kw):
|
||||||
return json.loads(
|
return self._dataOnly(json.loads(
|
||||||
self.req("mediaSanity", StringIO(json.dumps(kw))))
|
self.req("mediaSanity", StringIO(json.dumps(kw)))))
|
||||||
|
|
||||||
def mediaList(self):
|
def _dataOnly(self, resp):
|
||||||
return json.loads(
|
if resp['err']:
|
||||||
self.req("mediaList"))
|
self.col.log("error returned:%s"%resp['err'])
|
||||||
|
raise Exception("SyncError:%s"%resp['err'])
|
||||||
|
return resp['data']
|
||||||
|
|
||||||
# only for unit tests
|
# only for unit tests
|
||||||
def mediatest(self, n):
|
def mediatest(self, cmd):
|
||||||
return json.loads(
|
self.postVars = dict(
|
||||||
self.req("mediatest", StringIO(
|
k=self.hkey,
|
||||||
json.dumps(dict(n=n)))))
|
)
|
||||||
|
return self._dataOnly(json.loads(
|
||||||
|
self.req("newMediaTest", StringIO(
|
||||||
|
json.dumps(dict(cmd=cmd))))))
|
||||||
|
@ -189,6 +189,14 @@ def parseArgs(argv):
|
|||||||
return parser.parse_args(argv[1:])
|
return parser.parse_args(argv[1:])
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
|
try:
|
||||||
|
_run()
|
||||||
|
except Exception, e:
|
||||||
|
QMessageBox.critical(None, "Startup Error",
|
||||||
|
"Please notify support of this error:\n\n"+
|
||||||
|
traceback.format_exc())
|
||||||
|
|
||||||
|
def _run():
|
||||||
global mw
|
global mw
|
||||||
|
|
||||||
# parse args
|
# parse args
|
||||||
@ -245,7 +253,4 @@ environment points to a valid, writable folder.""")
|
|||||||
# load the main window
|
# load the main window
|
||||||
import aqt.main
|
import aqt.main
|
||||||
mw = aqt.main.AnkiQt(app, pm, args)
|
mw = aqt.main.AnkiQt(app, pm, args)
|
||||||
app.exec_()
|
app.exec_()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
run()
|
|
@ -32,7 +32,7 @@ Alex Fraser, Andreas Klauer, Andrew Wright, Bernhard Ibertsberger, Charlene Bari
|
|||||||
Christian Krause, Christian Rusche, David Smith, Dave Druelinger, Dotan Cohen,
|
Christian Krause, Christian Rusche, David Smith, Dave Druelinger, Dotan Cohen,
|
||||||
Emilio Wuerges, Emmanuel Jarri, Frank Harper, Gregor Skumavc, H. Mijail,
|
Emilio Wuerges, Emmanuel Jarri, Frank Harper, Gregor Skumavc, H. Mijail,
|
||||||
Houssam Salem, Ian Lewis, Immanuel Asmus, Iroiro, Jarvik7,
|
Houssam Salem, Ian Lewis, Immanuel Asmus, Iroiro, Jarvik7,
|
||||||
Jin Eun-Deok, Jo Nakashima, Johanna Lindh, Julien Baley, Kieran Clancy, LaC, Laurent Steffan,
|
Jin Eun-Deok, Jo Nakashima, Johanna Lindh, Julien Baley, Jussi Määttä, Kieran Clancy, LaC, Laurent Steffan,
|
||||||
Luca Ban, Luciano Esposito, Marco Giancotti, Marcus Rubeus, Mari Egami, Michael Jürges, Mark Wilbur,
|
Luca Ban, Luciano Esposito, Marco Giancotti, Marcus Rubeus, Mari Egami, Michael Jürges, Mark Wilbur,
|
||||||
Matthew Duggan, Matthew Holtz, Meelis Vasser, Michael Keppler, Michael
|
Matthew Duggan, Matthew Holtz, Meelis Vasser, Michael Keppler, Michael
|
||||||
Montague, Michael Penkov, Michal Čadil, Morteza Salehi, Nathanael Law, Nick Cook, Niklas
|
Montague, Michael Penkov, Michal Čadil, Morteza Salehi, Nathanael Law, Nick Cook, Niklas
|
||||||
|
@ -1779,6 +1779,7 @@ class FavouritesLineEdit(QLineEdit):
|
|||||||
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
|
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
|
||||||
self.button.move(self.rect().right() - frameWidth - buttonSize.width(),
|
self.button.move(self.rect().right() - frameWidth - buttonSize.width(),
|
||||||
(self.rect().bottom() - buttonSize.height() + 1) / 2)
|
(self.rect().bottom() - buttonSize.height() + 1) / 2)
|
||||||
|
self.setTextMargins(0, 0, buttonSize.width() * 1.5, 0)
|
||||||
super(FavouritesLineEdit, self).resizeEvent(event)
|
super(FavouritesLineEdit, self).resizeEvent(event)
|
||||||
|
|
||||||
def setIcon(self, path):
|
def setIcon(self, path):
|
||||||
|
@ -317,7 +317,7 @@ backup, please see the 'Backups' section of the user manual."""))
|
|||||||
try:
|
try:
|
||||||
z.getinfo("collection.anki2")
|
z.getinfo("collection.anki2")
|
||||||
except:
|
except:
|
||||||
showWarning(_("The provided file is not a valid .apkg file."))
|
showWarning(invalidZipMsg())
|
||||||
return
|
return
|
||||||
# we need to ask whether to import/replace
|
# we need to ask whether to import/replace
|
||||||
if not setupApkgImport(mw, importer):
|
if not setupApkgImport(mw, importer):
|
||||||
@ -326,12 +326,7 @@ backup, please see the 'Backups' section of the user manual."""))
|
|||||||
try:
|
try:
|
||||||
importer.run()
|
importer.run()
|
||||||
except zipfile.BadZipfile:
|
except zipfile.BadZipfile:
|
||||||
msg = _("""\
|
showWarning(invalidZipMsg())
|
||||||
This file does not appear to be a valid .apkg file. If you're getting this \
|
|
||||||
error from a file downloaded from AnkiWeb, chances are that your download \
|
|
||||||
failed. Please try again, and if the problem persists, please try again \
|
|
||||||
with a different browser.""")
|
|
||||||
showWarning(msg)
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
err = repr(str(e))
|
err = repr(str(e))
|
||||||
if "invalidFile" in err:
|
if "invalidFile" in err:
|
||||||
@ -357,6 +352,13 @@ Unable to import from a read-only file."""))
|
|||||||
mw.progress.finish()
|
mw.progress.finish()
|
||||||
mw.reset()
|
mw.reset()
|
||||||
|
|
||||||
|
def invalidZipMsg():
|
||||||
|
return _("""\
|
||||||
|
This file does not appear to be a valid .apkg file. If you're getting this \
|
||||||
|
error from a file downloaded from AnkiWeb, chances are that your download \
|
||||||
|
failed. Please try again, and if the problem persists, please try again \
|
||||||
|
with a different browser.""")
|
||||||
|
|
||||||
def setupApkgImport(mw, importer):
|
def setupApkgImport(mw, importer):
|
||||||
base = os.path.basename(importer.file).lower()
|
base = os.path.basename(importer.file).lower()
|
||||||
full = (base == "collection.apkg") or re.match("backup-.*\\.apkg", base)
|
full = (base == "collection.apkg") or re.match("backup-.*\\.apkg", base)
|
||||||
|
30
aqt/main.py
30
aqt/main.py
@ -271,14 +271,13 @@ To import into a password protected profile, please open the profile before atte
|
|||||||
self.col = Collection(cpath, log=True)
|
self.col = Collection(cpath, log=True)
|
||||||
except anki.db.Error:
|
except anki.db.Error:
|
||||||
# warn user
|
# warn user
|
||||||
showWarning("""\
|
showWarning(_("""\
|
||||||
Your collection is corrupt. Please see the manual for \
|
Your collection is corrupt. Please create a new profile, then \
|
||||||
how to restore from a backup.""")
|
see the manual for how to restore from an automatic backup.
|
||||||
# move it out of the way so the profile can be used again
|
|
||||||
newpath = cpath+str(intTime())
|
Debug info:
|
||||||
os.rename(cpath, newpath)
|
""")+traceback.format_exc())
|
||||||
# then close
|
self.unloadProfile()
|
||||||
sys.exit(1)
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
# the custom exception handler won't catch this if we immediately
|
# the custom exception handler won't catch this if we immediately
|
||||||
# unload, so we have to manually handle it
|
# unload, so we have to manually handle it
|
||||||
@ -304,12 +303,17 @@ how to restore from a backup.""")
|
|||||||
if self.col:
|
if self.col:
|
||||||
if not self.closeAllCollectionWindows():
|
if not self.closeAllCollectionWindows():
|
||||||
return
|
return
|
||||||
self.maybeOptimize()
|
|
||||||
self.progress.start(immediate=True)
|
self.progress.start(immediate=True)
|
||||||
if os.getenv("ANKIDEV", 0):
|
corrupt = False
|
||||||
corrupt = False
|
try:
|
||||||
else:
|
self.maybeOptimize()
|
||||||
corrupt = self.col.db.scalar("pragma integrity_check") != "ok"
|
except:
|
||||||
|
corrupt = True
|
||||||
|
if not corrupt:
|
||||||
|
if os.getenv("ANKIDEV", 0):
|
||||||
|
corrupt = False
|
||||||
|
else:
|
||||||
|
corrupt = self.col.db.scalar("pragma integrity_check") != "ok"
|
||||||
if corrupt:
|
if corrupt:
|
||||||
showWarning(_("Your collection file appears to be corrupt. \
|
showWarning(_("Your collection file appears to be corrupt. \
|
||||||
This can happen when the file is copied or moved while Anki is open, or \
|
This can happen when the file is copied or moved while Anki is open, or \
|
||||||
|
@ -98,6 +98,7 @@ Not currently enabled; click the sync button in the main window to enable."""))
|
|||||||
|
|
||||||
def onSyncDeauth(self):
|
def onSyncDeauth(self):
|
||||||
self.prof['syncKey'] = None
|
self.prof['syncKey'] = None
|
||||||
|
self.mw.col.media.forceResync()
|
||||||
self._hideAuth()
|
self._hideAuth()
|
||||||
|
|
||||||
def updateNetwork(self):
|
def updateNetwork(self):
|
||||||
|
@ -231,7 +231,15 @@ and no other programs are accessing your profile folders, then try again."""))
|
|||||||
new = not os.path.exists(path)
|
new = not os.path.exists(path)
|
||||||
def recover():
|
def recover():
|
||||||
# if we can't load profile, start with a new one
|
# if we can't load profile, start with a new one
|
||||||
os.rename(path, path+".broken")
|
if self.db:
|
||||||
|
try:
|
||||||
|
self.db.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
broken = path+".broken"
|
||||||
|
if os.path.exists(broken):
|
||||||
|
os.unlink(broken)
|
||||||
|
os.rename(path, broken)
|
||||||
QMessageBox.warning(
|
QMessageBox.warning(
|
||||||
None, "Preferences Corrupt", """\
|
None, "Preferences Corrupt", """\
|
||||||
Anki's prefs.db file was corrupt and has been recreated. If you were using multiple \
|
Anki's prefs.db file was corrupt and has been recreated. If you were using multiple \
|
||||||
|
@ -404,9 +404,9 @@ class SyncThread(QThread):
|
|||||||
def _syncMedia(self):
|
def _syncMedia(self):
|
||||||
if not self.media:
|
if not self.media:
|
||||||
return
|
return
|
||||||
self.server = RemoteMediaServer(self.hkey, self.server.con)
|
self.server = RemoteMediaServer(self.col, self.hkey, self.server.con)
|
||||||
self.client = MediaSyncer(self.col, self.server)
|
self.client = MediaSyncer(self.col, self.server)
|
||||||
ret = self.client.sync(self.mediaUsn)
|
ret = self.client.sync()
|
||||||
if ret == "noChanges":
|
if ret == "noChanges":
|
||||||
self.fireEvent("noMediaChanges")
|
self.fireEvent("noMediaChanges")
|
||||||
elif ret == "sanityCheckFailed":
|
elif ret == "sanityCheckFailed":
|
||||||
|
Loading…
Reference in New Issue
Block a user