Merge branch 'master' into search-errors
Add error support for new resched node, update rated error.
This commit is contained in:
commit
cb4a103bb1
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,4 +2,5 @@ __pycache__
|
||||
.DS_Store
|
||||
/bazel-*
|
||||
anki.prof
|
||||
target
|
||||
user.bazelrc
|
||||
|
38
Cargo.lock
generated
38
Cargo.lock
generated
@ -2,9 +2,9 @@
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.14.0"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423"
|
||||
checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
@ -36,6 +36,7 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"askama",
|
||||
"async-compression",
|
||||
"async-trait",
|
||||
"blake3",
|
||||
"bytes 0.5.6",
|
||||
"chrono",
|
||||
@ -180,6 +181,17 @@ dependencies = [
|
||||
"pin-project-lite 0.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-trait"
|
||||
version = "0.1.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
@ -429,9 +441,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "derivative"
|
||||
version = "2.1.1"
|
||||
version = "2.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f"
|
||||
checksum = "eaed5874effa6cde088c644ddcdcb4ffd1511391c5be4fdd7a5ccd02c7e4a183"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -590,9 +602,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fluent-syntax"
|
||||
version = "0.10.0"
|
||||
version = "0.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9389354f858e38f37d9a249133611a1fcaec469f44773b04ddbd82f4f08d49eb"
|
||||
checksum = "edb1016e8c600060e0099218442fff329a204f6316d6ec974d590d3281517a52"
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
@ -763,11 +775,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.1.15"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
|
||||
checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10",
|
||||
"cfg-if 1.0.0",
|
||||
"libc",
|
||||
"wasi 0.9.0+wasi-snapshot-preview1",
|
||||
]
|
||||
@ -2082,9 +2094,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.5.1"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75"
|
||||
checksum = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
@ -2123,9 +2135,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.56"
|
||||
version = "1.0.57"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9802ddde94170d186eeee5005b798d9c159fa970403f1be19976d0cfb939b72"
|
||||
checksum = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -13,12 +13,12 @@ def raze_fetch_remote_crates():
|
||||
"""This function defines a collection of repos and should be called in a WORKSPACE file"""
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__addr2line__0_14_0",
|
||||
url = "https://crates.io/api/v1/crates/addr2line/0.14.0/download",
|
||||
name = "raze__addr2line__0_14_1",
|
||||
url = "https://crates.io/api/v1/crates/addr2line/0.14.1/download",
|
||||
type = "tar.gz",
|
||||
sha256 = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423",
|
||||
strip_prefix = "addr2line-0.14.0",
|
||||
build_file = Label("//cargo/remote:BUILD.addr2line-0.14.0.bazel"),
|
||||
sha256 = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7",
|
||||
strip_prefix = "addr2line-0.14.1",
|
||||
build_file = Label("//cargo/remote:BUILD.addr2line-0.14.1.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
@ -151,6 +151,16 @@ def raze_fetch_remote_crates():
|
||||
build_file = Label("//cargo/remote:BUILD.async-compression-0.3.7.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__async_trait__0_1_42",
|
||||
url = "https://crates.io/api/v1/crates/async-trait/0.1.42/download",
|
||||
type = "tar.gz",
|
||||
sha256 = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d",
|
||||
strip_prefix = "async-trait-0.1.42",
|
||||
build_file = Label("//cargo/remote:BUILD.async-trait-0.1.42.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__atty__0_2_14",
|
||||
@ -433,12 +443,12 @@ def raze_fetch_remote_crates():
|
||||
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__derivative__2_1_1",
|
||||
url = "https://crates.io/api/v1/crates/derivative/2.1.1/download",
|
||||
name = "raze__derivative__2_1_3",
|
||||
url = "https://crates.io/api/v1/crates/derivative/2.1.3/download",
|
||||
type = "tar.gz",
|
||||
sha256 = "cb582b60359da160a9477ee80f15c8d784c477e69c217ef2cdd4169c24ea380f",
|
||||
strip_prefix = "derivative-2.1.1",
|
||||
build_file = Label("//cargo/remote:BUILD.derivative-2.1.1.bazel"),
|
||||
sha256 = "eaed5874effa6cde088c644ddcdcb4ffd1511391c5be4fdd7a5ccd02c7e4a183",
|
||||
strip_prefix = "derivative-2.1.3",
|
||||
build_file = Label("//cargo/remote:BUILD.derivative-2.1.3.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
@ -603,12 +613,12 @@ def raze_fetch_remote_crates():
|
||||
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__fluent_syntax__0_10_0",
|
||||
url = "https://crates.io/api/v1/crates/fluent-syntax/0.10.0/download",
|
||||
name = "raze__fluent_syntax__0_10_1",
|
||||
url = "https://crates.io/api/v1/crates/fluent-syntax/0.10.1/download",
|
||||
type = "tar.gz",
|
||||
sha256 = "9389354f858e38f37d9a249133611a1fcaec469f44773b04ddbd82f4f08d49eb",
|
||||
strip_prefix = "fluent-syntax-0.10.0",
|
||||
build_file = Label("//cargo/remote:BUILD.fluent-syntax-0.10.0.bazel"),
|
||||
sha256 = "edb1016e8c600060e0099218442fff329a204f6316d6ec974d590d3281517a52",
|
||||
strip_prefix = "fluent-syntax-0.10.1",
|
||||
build_file = Label("//cargo/remote:BUILD.fluent-syntax-0.10.1.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
@ -793,12 +803,12 @@ def raze_fetch_remote_crates():
|
||||
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__getrandom__0_1_15",
|
||||
url = "https://crates.io/api/v1/crates/getrandom/0.1.15/download",
|
||||
name = "raze__getrandom__0_1_16",
|
||||
url = "https://crates.io/api/v1/crates/getrandom/0.1.16/download",
|
||||
type = "tar.gz",
|
||||
sha256 = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6",
|
||||
strip_prefix = "getrandom-0.1.15",
|
||||
build_file = Label("//cargo/remote:BUILD.getrandom-0.1.15.bazel"),
|
||||
sha256 = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce",
|
||||
strip_prefix = "getrandom-0.1.16",
|
||||
build_file = Label("//cargo/remote:BUILD.getrandom-0.1.16.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
@ -2103,12 +2113,12 @@ def raze_fetch_remote_crates():
|
||||
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__smallvec__1_5_1",
|
||||
url = "https://crates.io/api/v1/crates/smallvec/1.5.1/download",
|
||||
name = "raze__smallvec__1_6_0",
|
||||
url = "https://crates.io/api/v1/crates/smallvec/1.6.0/download",
|
||||
type = "tar.gz",
|
||||
sha256 = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75",
|
||||
strip_prefix = "smallvec-1.5.1",
|
||||
build_file = Label("//cargo/remote:BUILD.smallvec-1.5.1.bazel"),
|
||||
sha256 = "1a55ca5f3b68e41c979bf8c46a6f1da892ca4db8f94023ce0bd32407573b1ac0",
|
||||
strip_prefix = "smallvec-1.6.0",
|
||||
build_file = Label("//cargo/remote:BUILD.smallvec-1.6.0.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
@ -2163,12 +2173,12 @@ def raze_fetch_remote_crates():
|
||||
|
||||
maybe(
|
||||
http_archive,
|
||||
name = "raze__syn__1_0_56",
|
||||
url = "https://crates.io/api/v1/crates/syn/1.0.56/download",
|
||||
name = "raze__syn__1_0_57",
|
||||
url = "https://crates.io/api/v1/crates/syn/1.0.57/download",
|
||||
type = "tar.gz",
|
||||
sha256 = "a9802ddde94170d186eeee5005b798d9c159fa970403f1be19976d0cfb939b72",
|
||||
strip_prefix = "syn-1.0.56",
|
||||
build_file = Label("//cargo/remote:BUILD.syn-1.0.56.bazel"),
|
||||
sha256 = "4211ce9909eb971f111059df92c45640aad50a619cf55cd76476be803c4c68e6",
|
||||
strip_prefix = "syn-1.0.57",
|
||||
build_file = Label("//cargo/remote:BUILD.syn-1.0.57.bazel"),
|
||||
)
|
||||
|
||||
maybe(
|
||||
|
@ -1,7 +1,7 @@
|
||||
[
|
||||
{
|
||||
"name": "addr2line",
|
||||
"version": "0.14.0",
|
||||
"version": "0.14.1",
|
||||
"authors": "Nick Fitzgerald <fitzgen@gmail.com>|Philip Craig <philipjcraig@gmail.com>|Jon Gjengset <jon@thesquareplanet.com>|Noah Bergbauer <noah.bergbauer@tum.de>",
|
||||
"repository": "https://github.com/gimli-rs/addr2line",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
@ -143,6 +143,15 @@
|
||||
"license_file": null,
|
||||
"description": "Adaptors between compression crates and Rust's modern asynchronous IO types."
|
||||
},
|
||||
{
|
||||
"name": "async-trait",
|
||||
"version": "0.1.42",
|
||||
"authors": "David Tolnay <dtolnay@gmail.com>",
|
||||
"repository": "https://github.com/dtolnay/async-trait",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
"license_file": null,
|
||||
"description": "Type erasure for async trait methods"
|
||||
},
|
||||
{
|
||||
"name": "atty",
|
||||
"version": "0.2.14",
|
||||
@ -397,7 +406,7 @@
|
||||
},
|
||||
{
|
||||
"name": "derivative",
|
||||
"version": "2.1.1",
|
||||
"version": "2.1.3",
|
||||
"authors": "mcarton <cartonmartin+git@gmail.com>",
|
||||
"repository": "https://github.com/mcarton/rust-derivative",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
@ -550,7 +559,7 @@
|
||||
},
|
||||
{
|
||||
"name": "fluent-syntax",
|
||||
"version": "0.10.0",
|
||||
"version": "0.10.1",
|
||||
"authors": "Zibi Braniecki <gandalf@mozilla.com>|Staś Małolepszy <stas@mozilla.com>",
|
||||
"repository": "https://github.com/projectfluent/fluent-rs",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
@ -721,7 +730,7 @@
|
||||
},
|
||||
{
|
||||
"name": "getrandom",
|
||||
"version": "0.1.15",
|
||||
"version": "0.1.16",
|
||||
"authors": "The Rand Project Developers",
|
||||
"repository": "https://github.com/rust-random/getrandom",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
@ -1909,7 +1918,7 @@
|
||||
},
|
||||
{
|
||||
"name": "smallvec",
|
||||
"version": "1.5.1",
|
||||
"version": "1.6.0",
|
||||
"authors": "The Servo Project Developers",
|
||||
"repository": "https://github.com/servo/rust-smallvec",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
@ -1963,7 +1972,7 @@
|
||||
},
|
||||
{
|
||||
"name": "syn",
|
||||
"version": "1.0.56",
|
||||
"version": "1.0.57",
|
||||
"authors": "David Tolnay <dtolnay@gmail.com>",
|
||||
"repository": "https://github.com/dtolnay/syn",
|
||||
"license": "Apache-2.0 OR MIT",
|
||||
|
@ -48,7 +48,7 @@ rust_library(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "0.14.0",
|
||||
version = "0.14.1",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__gimli__0_23_0//:gimli",
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__askama_shared__0_11_1//:askama_shared",
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -63,7 +63,7 @@ rust_library(
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__serde__1_0_118//:serde",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
"@raze__toml__0_5_8//:toml",
|
||||
],
|
||||
)
|
||||
|
60
cargo/remote/BUILD.async-trait-0.1.42.bazel
Normal file
60
cargo/remote/BUILD.async-trait-0.1.42.bazel
Normal file
@ -0,0 +1,60 @@
|
||||
"""
|
||||
@generated
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
|
||||
# buildifier: disable=load
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_binary",
|
||||
"rust_library",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
# buildifier: disable=load
|
||||
load("@bazel_skylib//lib:selects.bzl", "selects")
|
||||
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"notice", # MIT from expression "MIT OR Apache-2.0"
|
||||
])
|
||||
|
||||
# Generated Targets
|
||||
|
||||
rust_library(
|
||||
name = "async_trait",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
crate_features = [
|
||||
],
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "proc-macro",
|
||||
data = [],
|
||||
edition = "2018",
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
tags = [
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "0.1.42",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
||||
# Unsupported target "compiletest" with type "test" omitted
|
||||
|
||||
# Unsupported target "test" with type "test" omitted
|
@ -63,7 +63,7 @@ rust_library(
|
||||
version = "0.3.55",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__addr2line__0_14_0//:addr2line",
|
||||
"@raze__addr2line__0_14_1//:addr2line",
|
||||
"@raze__cfg_if__1_0_0//:cfg_if",
|
||||
"@raze__libc__0_2_81//:libc",
|
||||
"@raze__miniz_oxide__0_4_3//:miniz_oxide",
|
||||
|
@ -52,6 +52,6 @@ rust_library(
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -47,15 +47,17 @@ rust_library(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "2.1.1",
|
||||
version = "2.1.3",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
||||
# Unsupported target "clippy-warning-clone-from" with type "test" omitted
|
||||
|
||||
# Unsupported target "compile-test" with type "test" omitted
|
||||
|
||||
# Unsupported target "derive-clone" with type "test" omitted
|
@ -80,7 +80,7 @@ rust_library(
|
||||
":failure_derive_build_script",
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
"@raze__synstructure__0_12_4//:synstructure",
|
||||
],
|
||||
)
|
||||
|
@ -52,11 +52,11 @@ rust_library(
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__fluent_langneg__0_13_0//:fluent_langneg",
|
||||
"@raze__fluent_syntax__0_10_0//:fluent_syntax",
|
||||
"@raze__fluent_syntax__0_10_1//:fluent_syntax",
|
||||
"@raze__intl_memoizer__0_5_0//:intl_memoizer",
|
||||
"@raze__intl_pluralrules__7_0_0//:intl_pluralrules",
|
||||
"@raze__rental__0_5_5//:rental",
|
||||
"@raze__smallvec__1_5_1//:smallvec",
|
||||
"@raze__smallvec__1_6_0//:smallvec",
|
||||
"@raze__unic_langid__0_9_0//:unic_langid",
|
||||
],
|
||||
)
|
||||
|
@ -50,7 +50,7 @@ rust_binary(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "0.10.0",
|
||||
version = "0.10.1",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
# Binaries get an implicit dependency on their crate's lib
|
||||
@ -76,7 +76,7 @@ rust_binary(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "0.10.0",
|
||||
version = "0.10.1",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
# Binaries get an implicit dependency on their crate's lib
|
||||
@ -101,7 +101,7 @@ rust_library(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "0.10.0",
|
||||
version = "0.10.1",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
],
|
@ -54,6 +54,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -53,7 +53,7 @@ cargo_build_script(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "0.1.15",
|
||||
version = "0.1.16",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
] + selects.with_or({
|
||||
@ -91,11 +91,11 @@ rust_library(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "0.1.15",
|
||||
version = "0.1.16",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
":getrandom_build_script",
|
||||
"@raze__cfg_if__0_1_10//:cfg_if",
|
||||
"@raze__cfg_if__1_0_0//:cfg_if",
|
||||
] + selects.with_or({
|
||||
# cfg(unix)
|
||||
(
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -42,7 +42,7 @@ rust_library(
|
||||
data = [],
|
||||
edition = "2018",
|
||||
proc_macro_deps = [
|
||||
"@raze__derivative__2_1_1//:derivative",
|
||||
"@raze__derivative__2_1_3//:derivative",
|
||||
"@raze__num_enum_derive__0_5_1//:num_enum_derive",
|
||||
],
|
||||
rustc_flags = [
|
||||
|
@ -54,6 +54,6 @@ rust_library(
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__proc_macro_crate__0_1_5//:proc_macro_crate",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -53,7 +53,7 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__cfg_if__1_0_0//:cfg_if",
|
||||
"@raze__instant__0_1_9//:instant",
|
||||
"@raze__smallvec__1_5_1//:smallvec",
|
||||
"@raze__smallvec__1_6_0//:smallvec",
|
||||
] + selects.with_or({
|
||||
# cfg(unix)
|
||||
(
|
||||
|
@ -80,6 +80,6 @@ rust_library(
|
||||
":pin_project_internal_build_script",
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -53,6 +53,6 @@ rust_library(
|
||||
"@raze__itertools__0_9_0//:itertools",
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__pyo3_macros_backend__0_13_0//:pyo3_macros_backend",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -46,7 +46,7 @@ rust_library(
|
||||
name = "rand",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
aliases = {
|
||||
"@raze__getrandom__0_1_15//:getrandom": "getrandom_package",
|
||||
"@raze__getrandom__0_1_16//:getrandom": "getrandom_package",
|
||||
},
|
||||
crate_features = [
|
||||
"alloc",
|
||||
@ -70,7 +70,7 @@ rust_library(
|
||||
version = "0.7.3",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__getrandom__0_1_15//:getrandom",
|
||||
"@raze__getrandom__0_1_16//:getrandom",
|
||||
"@raze__rand_chacha__0_2_2//:rand_chacha",
|
||||
"@raze__rand_core__0_5_1//:rand_core",
|
||||
] + selects.with_or({
|
||||
|
@ -52,6 +52,6 @@ rust_library(
|
||||
version = "0.5.1",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__getrandom__0_1_15//:getrandom",
|
||||
"@raze__getrandom__0_1_16//:getrandom",
|
||||
],
|
||||
)
|
||||
|
@ -52,7 +52,7 @@ rust_library(
|
||||
version = "0.3.5",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__getrandom__0_1_15//:getrandom",
|
||||
"@raze__getrandom__0_1_16//:getrandom",
|
||||
"@raze__redox_syscall__0_1_57//:redox_syscall",
|
||||
"@raze__rust_argon2__0_8_3//:rust_argon2",
|
||||
],
|
||||
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -64,7 +64,7 @@ rust_library(
|
||||
"@raze__hashlink__0_6_0//:hashlink",
|
||||
"@raze__libsqlite3_sys__0_20_1//:libsqlite3_sys",
|
||||
"@raze__memchr__2_3_4//:memchr",
|
||||
"@raze__smallvec__1_5_1//:smallvec",
|
||||
"@raze__smallvec__1_6_0//:smallvec",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -82,6 +82,6 @@ rust_library(
|
||||
":serde_derive_build_script",
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -51,7 +51,7 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -48,7 +48,7 @@ rust_library(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "1.5.1",
|
||||
version = "1.6.0",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
],
|
@ -64,7 +64,7 @@ cargo_build_script(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "1.0.56",
|
||||
version = "1.0.57",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
],
|
||||
@ -102,7 +102,7 @@ rust_library(
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
version = "1.0.56",
|
||||
version = "1.0.57",
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
":syn_build_script",
|
@ -53,7 +53,7 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
"@raze__unicode_xid__0_2_1//:unicode_xid",
|
||||
],
|
||||
)
|
||||
|
@ -51,6 +51,6 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
],
|
||||
)
|
||||
|
@ -53,7 +53,7 @@ rust_library(
|
||||
# buildifier: leave-alone
|
||||
deps = [
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
"@raze__unic_langid_impl__0_9_0//:unic_langid_impl",
|
||||
],
|
||||
)
|
||||
|
@ -55,7 +55,7 @@ rust_library(
|
||||
"@raze__log__0_4_11//:log",
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
"@raze__wasm_bindgen_shared__0_2_69//:wasm_bindgen_shared",
|
||||
],
|
||||
)
|
||||
|
@ -52,7 +52,7 @@ rust_library(
|
||||
deps = [
|
||||
"@raze__proc_macro2__1_0_24//:proc_macro2",
|
||||
"@raze__quote__1_0_8//:quote",
|
||||
"@raze__syn__1_0_56//:syn",
|
||||
"@raze__syn__1_0_57//:syn",
|
||||
"@raze__wasm_bindgen_backend__0_2_69//:wasm_bindgen_backend",
|
||||
"@raze__wasm_bindgen_shared__0_2_69//:wasm_bindgen_shared",
|
||||
],
|
||||
|
103
docs/syncserver.md
Normal file
103
docs/syncserver.md
Normal file
@ -0,0 +1,103 @@
|
||||
# Local sync server
|
||||
|
||||
A local sync server is bundled with Anki. If you cannot or do not wish to
|
||||
use AnkiWeb, you can run the server on a machine on your local network.
|
||||
|
||||
Things to be aware of:
|
||||
|
||||
- Media syncing is not currently supported. You will either need to disable
|
||||
syncing of sounds and images in the preferences screen, sync your media via
|
||||
AnkiWeb, or use some other solution.
|
||||
- AnkiMobile does not yet provide an option for using a local sync server,
|
||||
so for now this will only be usable with the computer version of Anki, and
|
||||
AnkiDroid.
|
||||
- This code is partly new, and while it has had some testing, it's possible
|
||||
something has been missed. Please make backups, and report any bugs you run
|
||||
into.
|
||||
- The server runs over an unencrypted HTTP connection and does not require
|
||||
authentication, so it is only suitable for use on a private network.
|
||||
- This is an advanced feature, targeted at users who are comfortable with
|
||||
networking and the command line. If you use this, the expectation is you
|
||||
can resolve any setup/network/firewall issues you run into yourself, and
|
||||
use of this is entirely at your own risk.
|
||||
|
||||
## From source
|
||||
|
||||
If you run Anki from git, you can run a sync server with:
|
||||
|
||||
```
|
||||
./scripts/runopt --syncserver
|
||||
```
|
||||
|
||||
## From a packaged build
|
||||
|
||||
From 2.1.39beta1+, the sync server is included in the packaged binaries.
|
||||
|
||||
On Windows in a cmd.exe session:
|
||||
|
||||
```
|
||||
"\program files\anki\anki-console.exe" --syncserver
|
||||
```
|
||||
|
||||
Or MacOS, in Terminal.app:
|
||||
|
||||
```
|
||||
/Applications/Anki.app/Contents/MacOS/AnkiMac --syncserver
|
||||
```
|
||||
|
||||
Or Linux:
|
||||
|
||||
```
|
||||
anki --syncserver
|
||||
```
|
||||
|
||||
## Without Qt dependencies
|
||||
|
||||
You can run the server without installing the GUI portion of Anki. Once Anki
|
||||
2.1.39 is released, the following will work:
|
||||
|
||||
```
|
||||
pip install anki[syncserver]
|
||||
python -m anki.syncserver
|
||||
```
|
||||
|
||||
## Server setup
|
||||
|
||||
The server needs to store a copy of your collection in a folder.
|
||||
By default it is ~/.syncserver; you can change this by defining
|
||||
a `FOLDER` environmental variable. This should not be the same location
|
||||
as your normal Anki data folder.
|
||||
|
||||
You can also define `HOST` and `PORT`.
|
||||
|
||||
## Client setup
|
||||
|
||||
When the server starts, it will print the address it is listening on.
|
||||
You need to set an environmental variable before starting your Anki
|
||||
clients to tell them where to connect to. Eg:
|
||||
|
||||
```
|
||||
set SYNC_ENDPOINT="http://10.0.0.5:8080/sync/"
|
||||
anki
|
||||
```
|
||||
|
||||
Currently any username and password will be accepted. If you wish to
|
||||
keep using AnkiWeb for media, sync once with AnkiWeb first, then switch
|
||||
to your local endpoint - collection syncs will be local, and media syncs
|
||||
will continue to go to AnkiWeb.
|
||||
|
||||
## Contributing
|
||||
|
||||
Authentication shouldn't be too hard to add - login() and request() in
|
||||
http_client.rs can be used as a reference. A PR that accepts a password in an
|
||||
env var, and generates a stable hkey based on it would be welcome.
|
||||
|
||||
Once that is done, basic multi-profile support could be implemented by moving
|
||||
the col object into an array or dict, and fetching the relevant collection based
|
||||
on the user's authentication.
|
||||
|
||||
Because this server is bundled with Anki, simplicity is a design goal - it is
|
||||
targeted at individual/family use, only makes use of Python libraries the GUI is
|
||||
already using, and does not require a configuration file. PRs that deviate from
|
||||
this are less likely to be merged, so please consider reaching out first if you
|
||||
are thinking of starting work on a larger change.
|
@ -18,7 +18,8 @@ search-invalid-flag = Invalid search:<br><code>flag:</code> must be followed by
|
||||
search-invalid-added = Invalid search:<br><code>added:</code> must be followed by a positive number of days.
|
||||
search-invalid-edited = Invalid search:<br><code>edited:</code> must be followed by a positive number of days.
|
||||
search-invalid-rated-days = Invalid search:<br><code>rated:</code> must be followed by a positive number of days.
|
||||
search-invalid-rated-ease = Invalid search:<br><code>rated:{ $val }:</code> must be followed by <code>0</code> (rescheduled), <code>1</code> (again), <code>2</code> (hard), <code>3</code> (good) or <code>4</code> (easy).
|
||||
search-invalid-rated-ease = Invalid search:<br><code>rated:{ $val }:</code> must be followed by <code>1</code> (again), <code>2</code> (hard), <code>3</code> (good) or <code>4</code> (easy).
|
||||
search-invalid-resched = Invalid search:<br><code>resched:</code> must be followed by a positive number of days.
|
||||
search-invalid-dupe-mid = Invalid search:<br><code>dupe:</code> must be followed by a note type id, a comma and then arbitrary text.
|
||||
search-invalid-dupe-text = Invalid search:<br><code>dupe:</code> must be followed by a note type id, a comma and then arbitrary text.
|
||||
search-invalid-prop-property = Invalid search:<br><code>prop:</code> must be followed by one of the predefined card properties: <code>ivl</code> (interval), <code>due</code>, <code>reps</code> (repetitions), <code>lapses</code>, <code>ease</code> or <code>pos</code> (position).
|
||||
|
@ -117,16 +117,16 @@ def update_repos_bzl():
|
||||
out.append(line)
|
||||
open(path, "w").writelines(out)
|
||||
|
||||
commit_if_changed(root)
|
||||
commit_if_changed(root, update_label="translations")
|
||||
|
||||
|
||||
def commit_if_changed(folder: str):
|
||||
def commit_if_changed(folder: str, update_label: str):
|
||||
status = subprocess.run(["git", "diff", "--exit-code"], cwd=folder, check=False)
|
||||
if status.returncode == 0:
|
||||
# no changes
|
||||
return
|
||||
subprocess.run(
|
||||
["git", "commit", "-a", "-m", "update translations"], cwd=folder, check=True
|
||||
["git", "commit", "-a", "-m", "update " + update_label], cwd=folder, check=True
|
||||
)
|
||||
|
||||
|
||||
@ -147,7 +147,7 @@ def update_ftl_templates():
|
||||
],
|
||||
check=True,
|
||||
)
|
||||
commit_if_changed(module.folder())
|
||||
commit_if_changed(module.folder(), update_label="templates")
|
||||
|
||||
|
||||
def push_i18n_changes():
|
||||
|
@ -96,6 +96,8 @@ py_library(
|
||||
requirement("distro"),
|
||||
requirement("protobuf"),
|
||||
requirement("requests"),
|
||||
requirement("flask"),
|
||||
requirement("waitress"),
|
||||
] + orjson_if_available(),
|
||||
)
|
||||
|
||||
@ -110,6 +112,12 @@ py_wheel(
|
||||
abi = "abi3",
|
||||
description_file = "wheel_description.txt",
|
||||
distribution = "anki",
|
||||
extra_requires = {
|
||||
"syncserver": [
|
||||
"flask",
|
||||
"waitress",
|
||||
],
|
||||
},
|
||||
platform = select({
|
||||
"//platforms:windows_x86_64": "win_amd64",
|
||||
"//platforms:macos_x86_64": "macosx_10_7_x86_64",
|
||||
|
@ -135,14 +135,6 @@ class Collection:
|
||||
|
||||
self._loadScheduler()
|
||||
|
||||
# the sync code uses this to send the local timezone to AnkiWeb
|
||||
def localOffset(self) -> Optional[int]:
|
||||
"Minutes west of UTC. Only applies to V2 scheduler."
|
||||
if isinstance(self.sched, V1Scheduler):
|
||||
return None
|
||||
else:
|
||||
return self.backend.local_minutes_west(intTime())
|
||||
|
||||
# DB-related
|
||||
##########################################################################
|
||||
|
||||
@ -632,37 +624,6 @@ table.review-log {{ {revlog_style} }}
|
||||
# DB maintenance
|
||||
##########################################################################
|
||||
|
||||
def basicCheck(self) -> bool:
|
||||
"Basic integrity check for syncing. True if ok."
|
||||
# cards without notes
|
||||
if self.db.scalar(
|
||||
"""
|
||||
select 1 from cards where nid not in (select id from notes) limit 1"""
|
||||
):
|
||||
return False
|
||||
# notes without cards or models
|
||||
if self.db.scalar(
|
||||
"""
|
||||
select 1 from notes where id not in (select distinct nid from cards)
|
||||
or mid not in %s limit 1"""
|
||||
% ids2str(self.models.ids())
|
||||
):
|
||||
return False
|
||||
# invalid ords
|
||||
for m in self.models.all():
|
||||
# ignore clozes
|
||||
if m["type"] != MODEL_STD:
|
||||
continue
|
||||
if self.db.scalar(
|
||||
"""
|
||||
select 1 from cards where ord not in %s and nid in (
|
||||
select id from notes where mid = ?) limit 1"""
|
||||
% ids2str([t["ord"] for t in m["tmpls"]]),
|
||||
m["id"],
|
||||
):
|
||||
return False
|
||||
return True
|
||||
|
||||
def fixIntegrity(self) -> Tuple[str, bool]:
|
||||
"""Fix possible problems and rebuild caches.
|
||||
|
||||
|
@ -48,6 +48,8 @@ DeckTreeNode = pb.DeckTreeNode
|
||||
StockNoteType = pb.StockNoteType
|
||||
FilterToSearchIn = pb.FilterToSearchIn
|
||||
NamedFilter = pb.FilterToSearchIn.NamedFilter
|
||||
DupeIn = pb.FilterToSearchIn.DupeIn
|
||||
BackendNoteTypeID = pb.NoteTypeID
|
||||
ConcatSeparator = pb.ConcatenateSearchesIn.Separator
|
||||
SyncAuth = pb.SyncAuth
|
||||
SyncOutput = pb.SyncCollectionOut
|
||||
@ -274,10 +276,3 @@ def translate_string_in(
|
||||
else:
|
||||
args[k] = pb.TranslateArgValue(number=v)
|
||||
return pb.TranslateStringIn(key=key, args=args)
|
||||
|
||||
|
||||
# temporarily force logging of media handling
|
||||
if "RUST_LOG" not in os.environ:
|
||||
os.environ[
|
||||
"RUST_LOG"
|
||||
] = "warn,anki::media=debug,anki::sync=debug,anki::dbcheck=debug"
|
||||
|
@ -1,5 +1,8 @@
|
||||
# Copyright: Ankitects Pty Ltd and contributors
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
#
|
||||
# Legacy attributes some add-ons may be using
|
||||
#
|
||||
|
||||
from .httpclient import HttpClient
|
||||
|
||||
|
193
pylib/anki/syncserver/__init__.py
Normal file
193
pylib/anki/syncserver/__init__.py
Normal file
@ -0,0 +1,193 @@
|
||||
# Copyright: Ankitects Pty Ltd and contributors
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
#
|
||||
# Please see /docs/syncserver.md
|
||||
#
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import gzip
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
from io import BytesIO
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
import flask
|
||||
from waitress.server import create_server
|
||||
except ImportError as e:
|
||||
print(e, "- to use the server, 'pip install anki[syncserver]'")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
from flask import Response
|
||||
|
||||
from anki import Collection
|
||||
from anki.backend_pb2 import SyncServerMethodIn
|
||||
|
||||
Method = SyncServerMethodIn.Method # pylint: disable=no-member
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
col: Collection
|
||||
trace = os.getenv("TRACE")
|
||||
|
||||
|
||||
def get_request_data() -> bytes:
|
||||
buf = BytesIO()
|
||||
flask.request.files["data"].save(buf)
|
||||
buf.seek(0)
|
||||
zip = gzip.GzipFile(mode="rb", fileobj=buf)
|
||||
return zip.read()
|
||||
|
||||
|
||||
def get_request_data_into_file() -> bytes:
|
||||
"Returns the utf8 path to the resulting file."
|
||||
# this could be optimized to stream the data into a file
|
||||
# in the future
|
||||
data = get_request_data()
|
||||
tempobj = NamedTemporaryFile(dir=folder(), delete=False)
|
||||
tempobj.write(data)
|
||||
tempobj.close()
|
||||
return tempobj.name.encode("utf8")
|
||||
|
||||
|
||||
def handle_sync_request(method_str: str) -> Response:
|
||||
method = get_method(method_str)
|
||||
if method is None:
|
||||
raise Exception(f"unknown method: {method_str}")
|
||||
|
||||
if method == Method.FULL_UPLOAD:
|
||||
data = get_request_data_into_file()
|
||||
else:
|
||||
data = get_request_data()
|
||||
if trace:
|
||||
print("-->", data)
|
||||
|
||||
full = method in (Method.FULL_UPLOAD, Method.FULL_DOWNLOAD)
|
||||
if full:
|
||||
col.close_for_full_sync()
|
||||
try:
|
||||
outdata = col.backend.sync_server_method(method=method, data=data)
|
||||
except Exception as e:
|
||||
if method == Method.META:
|
||||
# if parallel syncing requests come in, block them
|
||||
print("exception in meta", e)
|
||||
return flask.make_response("Conflict", 409)
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
if full:
|
||||
after_full_sync()
|
||||
|
||||
resp = None
|
||||
if method == Method.FULL_UPLOAD:
|
||||
# upload call expects a raw string literal returned
|
||||
outdata = b"OK"
|
||||
elif method == Method.FULL_DOWNLOAD:
|
||||
path = outdata.decode("utf8")
|
||||
|
||||
def stream_reply():
|
||||
with open(path, "rb") as f:
|
||||
while chunk := f.read(16 * 1024):
|
||||
yield chunk
|
||||
os.unlink(path)
|
||||
|
||||
resp = Response(stream_reply())
|
||||
else:
|
||||
if trace:
|
||||
print("<--", outdata)
|
||||
|
||||
if not resp:
|
||||
resp = flask.make_response(outdata)
|
||||
resp.headers["Content-Type"] = "application/binary"
|
||||
return resp
|
||||
|
||||
|
||||
def after_full_sync():
|
||||
# the server methods do not reopen the collection after a full sync,
|
||||
# so we need to
|
||||
col.reopen(after_full_sync=False)
|
||||
col.db.rollback()
|
||||
|
||||
|
||||
def get_method(
|
||||
method_str: str,
|
||||
) -> Optional[SyncServerMethodIn.MethodValue]: # pylint: disable=no-member
|
||||
s = method_str
|
||||
if s == "hostKey":
|
||||
return Method.HOST_KEY
|
||||
elif s == "meta":
|
||||
return Method.META
|
||||
elif s == "start":
|
||||
return Method.START
|
||||
elif s == "applyGraves":
|
||||
return Method.APPLY_GRAVES
|
||||
elif s == "applyChanges":
|
||||
return Method.APPLY_CHANGES
|
||||
elif s == "chunk":
|
||||
return Method.CHUNK
|
||||
elif s == "applyChunk":
|
||||
return Method.APPLY_CHUNK
|
||||
elif s == "sanityCheck2":
|
||||
return Method.SANITY_CHECK
|
||||
elif s == "finish":
|
||||
return Method.FINISH
|
||||
elif s == "abort":
|
||||
return Method.ABORT
|
||||
elif s == "upload":
|
||||
return Method.FULL_UPLOAD
|
||||
elif s == "download":
|
||||
return Method.FULL_DOWNLOAD
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@app.route("/<path:pathin>", methods=["POST"])
|
||||
def handle_request(pathin: str):
|
||||
path = pathin
|
||||
print(int(time.time()), flask.request.remote_addr, path)
|
||||
|
||||
if path.startswith("sync/"):
|
||||
return handle_sync_request(path.split("/", maxsplit=1)[1])
|
||||
|
||||
|
||||
def folder():
|
||||
folder = os.getenv("FOLDER", os.path.expanduser("~/.syncserver"))
|
||||
if not os.path.exists(folder):
|
||||
print("creating", folder)
|
||||
os.mkdir(folder)
|
||||
return folder
|
||||
|
||||
|
||||
def col_path():
|
||||
return os.path.join(folder(), "collection.server.anki2")
|
||||
|
||||
|
||||
def serve():
|
||||
global col
|
||||
|
||||
col = Collection(col_path(), server=True)
|
||||
# don't hold an outer transaction open
|
||||
col.db.rollback()
|
||||
host = os.getenv("HOST", "0.0.0.0")
|
||||
port = int(os.getenv("PORT", "8080"))
|
||||
|
||||
server = create_server(
|
||||
app,
|
||||
host=host,
|
||||
port=port,
|
||||
clear_untrusted_proxy_headers=True,
|
||||
)
|
||||
|
||||
effective_port = server.effective_port # type: ignore
|
||||
print(f"Sync server listening on http://{host}:{effective_port}/sync/")
|
||||
if host == "0.0.0.0":
|
||||
ip = socket.gethostbyname(socket.gethostname())
|
||||
print(f"Replace 0.0.0.0 with your machine's IP address (perhaps {ip})")
|
||||
print(
|
||||
"For more info, see https://github.com/ankitects/anki/blob/master/docs/syncserver.md"
|
||||
)
|
||||
server.run()
|
6
pylib/anki/syncserver/__main__.py
Normal file
6
pylib/anki/syncserver/__main__.py
Normal file
@ -0,0 +1,6 @@
|
||||
# Copyright: Ankitects Pty Ltd and contributors
|
||||
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
from anki.syncserver import serve
|
||||
|
||||
serve()
|
@ -40,13 +40,11 @@ fn want_release_gil(method: u32) -> bool {
|
||||
| BackendMethod::RenderExistingCard
|
||||
| BackendMethod::RenderUncommittedCard
|
||||
| BackendMethod::StripAVTags
|
||||
| BackendMethod::LocalMinutesWest
|
||||
| BackendMethod::SchedTimingToday
|
||||
| BackendMethod::AddOrUpdateDeckLegacy
|
||||
| BackendMethod::NewDeckLegacy
|
||||
| BackendMethod::NewDeckConfigLegacy
|
||||
| BackendMethod::GetStockNotetypeLegacy
|
||||
| BackendMethod::SetLocalMinutesWest
|
||||
| BackendMethod::StudiedToday
|
||||
| BackendMethod::TranslateString
|
||||
| BackendMethod::FormatTimespan
|
||||
|
@ -316,9 +316,16 @@ def parseArgs(argv):
|
||||
parser.add_argument("-b", "--base", help="path to base folder", default="")
|
||||
parser.add_argument("-p", "--profile", help="profile name to load", default="")
|
||||
parser.add_argument("-l", "--lang", help="interface language (en, de, etc)")
|
||||
parser.add_argument("-v", "--version", help="print the Anki version and exit")
|
||||
parser.add_argument(
|
||||
"-s", "--safemode", help="disable add-ons and automatic syncing"
|
||||
"-v", "--version", help="print the Anki version and exit", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--safemode", help="disable add-ons and automatic syncing", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--syncserver",
|
||||
help="skip GUI and start a local sync server",
|
||||
action="store_true",
|
||||
)
|
||||
return parser.parse_known_args(argv[1:])
|
||||
|
||||
@ -433,7 +440,12 @@ def _run(argv=None, exec=True):
|
||||
opts, args = parseArgs(argv)
|
||||
|
||||
if opts.version:
|
||||
print(f"Anki version '{appVersion}'")
|
||||
print(f"Anki {appVersion}")
|
||||
return
|
||||
elif opts.syncserver:
|
||||
from anki.syncserver import serve
|
||||
|
||||
serve()
|
||||
return
|
||||
|
||||
if PROFILE_CODE:
|
||||
|
@ -21,8 +21,10 @@ from anki.lang import without_unicode_isolation
|
||||
from anki.models import NoteType
|
||||
from anki.notes import Note
|
||||
from anki.rsbackend import (
|
||||
BackendNoteTypeID,
|
||||
ConcatSeparator,
|
||||
DeckTreeNode,
|
||||
DupeIn,
|
||||
FilterToSearchIn,
|
||||
InvalidInput,
|
||||
NamedFilter,
|
||||
@ -2016,6 +2018,17 @@ where id in %s"""
|
||||
# Edit: finding dupes
|
||||
######################################################################
|
||||
|
||||
# filter called by the editor
|
||||
def search_dupe(self, mid: int, text: str):
|
||||
self.form.searchEdit.lineEdit().setText(
|
||||
self.col.backend.filter_to_search(
|
||||
FilterToSearchIn(
|
||||
dupe=DupeIn(mid=BackendNoteTypeID(ntid=mid), text=text)
|
||||
)
|
||||
)
|
||||
)
|
||||
self.onSearchActivated()
|
||||
|
||||
def onFindDupes(self):
|
||||
self.editor.saveNow(self._onFindDupes)
|
||||
|
||||
|
@ -537,12 +537,8 @@ class Editor:
|
||||
self.web.eval("setBackgrounds(%s);" % json.dumps(cols))
|
||||
|
||||
def showDupes(self):
|
||||
contents = self.note.fields[0].replace('"', r"\"")
|
||||
browser = aqt.dialogs.open("Browser", self.mw)
|
||||
browser.form.searchEdit.lineEdit().setText(
|
||||
'"dupe:%s,%s"' % (self.note.model()["id"], contents)
|
||||
)
|
||||
browser.onSearchActivated()
|
||||
browser.search_dupe(self.note.model()["id"], self.note.fields[0])
|
||||
|
||||
def fieldsAreBlank(self, previousNote=None):
|
||||
if not self.note:
|
||||
|
@ -122,10 +122,6 @@ def sync_collection(mw: aqt.main.AnkiQt, on_done: Callable[[], None]) -> None:
|
||||
else:
|
||||
full_sync(mw, out, on_done)
|
||||
|
||||
if not mw.col.basicCheck():
|
||||
showWarning("Please use Tools>Check Database")
|
||||
return on_done()
|
||||
|
||||
mw.col.save(trx=False)
|
||||
mw.taskman.with_progress(
|
||||
lambda: mw.col.backend.sync_collection(auth),
|
||||
|
@ -132,8 +132,8 @@ def register_repos():
|
||||
################
|
||||
|
||||
core_i18n_repo = "anki-core-i18n"
|
||||
core_i18n_commit = "3787f06947296f7ceee9c36000d9bf2176fbf8cf"
|
||||
core_i18n_zip_csum = "ff78d92f4b88821778c7b7e50baab705dc2e6fef46cd355fcd8a527f248bf9d6"
|
||||
core_i18n_commit = "e54034616bf0cdfa158ce78b1824b38144df6857"
|
||||
core_i18n_zip_csum = "3452b4b35d4058ff588f84644409ec204cbf58ae0c42974a713b21c2f00b40c9"
|
||||
|
||||
qtftl_i18n_repo = "anki-desktop-ftl"
|
||||
qtftl_i18n_commit = "881103198f1e3603c297bf449acb819f027442e6"
|
||||
|
1
rslib/.clang-format
Normal file
1
rslib/.clang-format
Normal file
@ -0,0 +1 @@
|
||||
BasedOnStyle: google
|
@ -65,6 +65,7 @@ rust_library(
|
||||
proc_macro_deps = [
|
||||
"//rslib/cargo:serde_derive",
|
||||
"//rslib/cargo:serde_repr",
|
||||
"//rslib/cargo:async_trait",
|
||||
],
|
||||
rustc_env = _anki_rustc_env,
|
||||
visibility = ["//visibility:public"],
|
||||
|
@ -74,3 +74,4 @@ unic-langid = { version = "0.9", features = ["macros"] }
|
||||
unicode-normalization = "0.1.13"
|
||||
utime = "0.3.1"
|
||||
zip = { version = "0.5.6", default-features = false, features = ["deflate", "time"] }
|
||||
async-trait = "0.1.42"
|
||||
|
@ -95,8 +95,6 @@ service BackendService {
|
||||
|
||||
// scheduling
|
||||
|
||||
rpc LocalMinutesWest(Int64) returns (Int32);
|
||||
rpc SetLocalMinutesWest(Int32) returns (Empty);
|
||||
rpc SchedTimingToday(Empty) returns (SchedTimingTodayOut);
|
||||
rpc StudiedToday(Empty) returns (String);
|
||||
rpc StudiedTodayMessage(StudiedTodayMessageIn) returns (String);
|
||||
@ -198,6 +196,7 @@ service BackendService {
|
||||
rpc SyncCollection(SyncAuth) returns (SyncCollectionOut);
|
||||
rpc FullUpload(SyncAuth) returns (Empty);
|
||||
rpc FullDownload(SyncAuth) returns (Empty);
|
||||
rpc SyncServerMethod(SyncServerMethodIn) returns (Json);
|
||||
|
||||
// translation/messages
|
||||
|
||||
@ -508,6 +507,7 @@ message SyncError {
|
||||
RESYNC_REQUIRED = 7;
|
||||
CLOCK_INCORRECT = 8;
|
||||
DATABASE_CHECK_REQUIRED = 9;
|
||||
SYNC_NOT_STARTED = 10;
|
||||
}
|
||||
SyncErrorKind kind = 1;
|
||||
}
|
||||
@ -775,12 +775,17 @@ message FilterToSearchIn {
|
||||
NO_FLAG = 15;
|
||||
ANY_FLAG = 16;
|
||||
}
|
||||
message DupeIn {
|
||||
NoteTypeID mid = 1;
|
||||
string text = 2;
|
||||
}
|
||||
oneof filter {
|
||||
NamedFilter name = 1;
|
||||
string tag = 2;
|
||||
string deck = 3;
|
||||
string note = 4;
|
||||
uint32 template = 5;
|
||||
DupeIn dupe = 6;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1015,6 +1020,26 @@ message SyncAuth {
|
||||
uint32 host_number = 2;
|
||||
}
|
||||
|
||||
message SyncServerMethodIn {
|
||||
enum Method {
|
||||
HOST_KEY = 0;
|
||||
META = 1;
|
||||
START = 2;
|
||||
APPLY_GRAVES = 3;
|
||||
APPLY_CHANGES = 4;
|
||||
CHUNK = 5;
|
||||
APPLY_CHUNK = 6;
|
||||
SANITY_CHECK = 7;
|
||||
FINISH = 8;
|
||||
ABORT = 9;
|
||||
// caller must reopen after these two are called
|
||||
FULL_UPLOAD = 10;
|
||||
FULL_DOWNLOAD = 11;
|
||||
}
|
||||
Method method = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
message RemoveNotesIn {
|
||||
repeated int64 note_ids = 1;
|
||||
repeated int64 card_ids = 2;
|
||||
|
@ -30,6 +30,15 @@ alias(
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "async_trait",
|
||||
actual = "@raze__async_trait__0_1_42//:async_trait",
|
||||
tags = [
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "blake3",
|
||||
actual = "@raze__blake3__0_3_7//:blake3",
|
||||
@ -104,7 +113,7 @@ alias(
|
||||
|
||||
alias(
|
||||
name = "fluent_syntax",
|
||||
actual = "@raze__fluent_syntax__0_10_0//:fluent_syntax",
|
||||
actual = "@raze__fluent_syntax__0_10_1//:fluent_syntax",
|
||||
tags = [
|
||||
"cargo-raze",
|
||||
"manual",
|
||||
|
211
rslib/src/backend/http_sync_server.rs
Normal file
211
rslib/src/backend/http_sync_server.rs
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use std::{path::PathBuf, sync::MutexGuard};
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use super::{Backend, BackendState};
|
||||
use crate::{
|
||||
err::SyncErrorKind,
|
||||
prelude::*,
|
||||
sync::{
|
||||
http::{
|
||||
ApplyChangesIn, ApplyChunkIn, ApplyGravesIn, HostKeyIn, HostKeyOut, MetaIn,
|
||||
SanityCheckIn, StartIn, SyncRequest,
|
||||
},
|
||||
Chunk, Graves, LocalServer, SanityCheckOut, SanityCheckStatus, SyncMeta, SyncServer,
|
||||
UnchunkedChanges, SYNC_VERSION_MAX, SYNC_VERSION_MIN,
|
||||
},
|
||||
};
|
||||
|
||||
impl Backend {
|
||||
fn with_sync_server<F, T>(&self, func: F) -> Result<T>
|
||||
where
|
||||
F: FnOnce(&mut LocalServer) -> Result<T>,
|
||||
{
|
||||
let mut state_guard = self.state.lock().unwrap();
|
||||
let out =
|
||||
func(
|
||||
state_guard
|
||||
.http_sync_server
|
||||
.as_mut()
|
||||
.ok_or_else(|| AnkiError::SyncError {
|
||||
kind: SyncErrorKind::SyncNotStarted,
|
||||
info: Default::default(),
|
||||
})?,
|
||||
);
|
||||
if out.is_err() {
|
||||
self.abort_and_restore_collection(Some(state_guard))
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Gives out a dummy hkey - auth should be implemented at a higher layer.
|
||||
fn host_key(&self, _input: HostKeyIn) -> Result<HostKeyOut> {
|
||||
Ok(HostKeyOut {
|
||||
key: "unimplemented".into(),
|
||||
})
|
||||
}
|
||||
|
||||
fn meta(&self, input: MetaIn) -> Result<SyncMeta> {
|
||||
if input.sync_version < SYNC_VERSION_MIN || input.sync_version > SYNC_VERSION_MAX {
|
||||
return Ok(SyncMeta {
|
||||
server_message: "Your Anki version is either too old, or too new.".into(),
|
||||
should_continue: false,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
let server = self.col_into_server()?;
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
let meta = rt.block_on(server.meta())?;
|
||||
self.server_into_col(server);
|
||||
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
/// Takes the collection from the backend, places it into a server, and returns it.
|
||||
fn col_into_server(&self) -> Result<LocalServer> {
|
||||
self.col
|
||||
.lock()
|
||||
.unwrap()
|
||||
.take()
|
||||
.map(LocalServer::new)
|
||||
.ok_or(AnkiError::CollectionNotOpen)
|
||||
}
|
||||
|
||||
fn server_into_col(&self, server: LocalServer) {
|
||||
let col = server.into_col();
|
||||
let mut col_guard = self.col.lock().unwrap();
|
||||
assert!(col_guard.replace(col).is_none());
|
||||
}
|
||||
|
||||
fn take_server(&self, state_guard: Option<MutexGuard<BackendState>>) -> Result<LocalServer> {
|
||||
let mut state_guard = state_guard.unwrap_or_else(|| self.state.lock().unwrap());
|
||||
state_guard
|
||||
.http_sync_server
|
||||
.take()
|
||||
.ok_or_else(|| AnkiError::SyncError {
|
||||
kind: SyncErrorKind::SyncNotStarted,
|
||||
info: String::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn start(&self, input: StartIn) -> Result<Graves> {
|
||||
// place col into new server
|
||||
let server = self.col_into_server()?;
|
||||
let mut state_guard = self.state.lock().unwrap();
|
||||
assert!(state_guard.http_sync_server.replace(server).is_none());
|
||||
drop(state_guard);
|
||||
|
||||
self.with_sync_server(|server| {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.start(input.client_usn, input.local_is_newer))
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_graves(&self, input: ApplyGravesIn) -> Result<()> {
|
||||
self.with_sync_server(|server| {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.apply_graves(input.chunk))
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_changes(&self, input: ApplyChangesIn) -> Result<UnchunkedChanges> {
|
||||
self.with_sync_server(|server| {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.apply_changes(input.changes))
|
||||
})
|
||||
}
|
||||
|
||||
fn chunk(&self) -> Result<Chunk> {
|
||||
self.with_sync_server(|server| {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.chunk())
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_chunk(&self, input: ApplyChunkIn) -> Result<()> {
|
||||
self.with_sync_server(|server| {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.apply_chunk(input.chunk))
|
||||
})
|
||||
}
|
||||
|
||||
fn sanity_check(&self, input: SanityCheckIn) -> Result<SanityCheckOut> {
|
||||
self.with_sync_server(|server| {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.sanity_check(input.client))
|
||||
})
|
||||
.map(|out| {
|
||||
if out.status != SanityCheckStatus::Ok {
|
||||
// sanity check failures are an implicit abort
|
||||
self.abort_and_restore_collection(None);
|
||||
}
|
||||
out
|
||||
})
|
||||
}
|
||||
|
||||
fn finish(&self) -> Result<TimestampMillis> {
|
||||
let out = self.with_sync_server(|server| {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.finish())
|
||||
});
|
||||
self.server_into_col(self.take_server(None)?);
|
||||
out
|
||||
}
|
||||
|
||||
fn abort(&self) -> Result<()> {
|
||||
self.abort_and_restore_collection(None);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn abort_and_restore_collection(&self, state_guard: Option<MutexGuard<BackendState>>) {
|
||||
if let Ok(mut server) = self.take_server(state_guard) {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
// attempt to roll back
|
||||
if let Err(abort_err) = rt.block_on(server.abort()) {
|
||||
println!("abort failed: {:?}", abort_err);
|
||||
}
|
||||
self.server_into_col(server);
|
||||
}
|
||||
}
|
||||
|
||||
/// Caller must re-open collection after this request. Provided file will be
|
||||
/// consumed.
|
||||
fn upload(&self, input: PathBuf) -> Result<()> {
|
||||
// spool input into a file
|
||||
let server = Box::new(self.col_into_server()?);
|
||||
// then process upload
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(server.full_upload(&input, true))
|
||||
}
|
||||
|
||||
/// Caller must re-open collection after this request, and is responsible
|
||||
/// for cleaning up the returned file.
|
||||
fn download(&self) -> Result<Vec<u8>> {
|
||||
let server = Box::new(self.col_into_server()?);
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
let file = rt.block_on(server.full_download())?;
|
||||
let path = file.into_temp_path().keep()?;
|
||||
Ok(path.to_str().expect("path was not in utf8").into())
|
||||
}
|
||||
|
||||
pub(crate) fn sync_server_method_inner(&self, req: SyncRequest) -> Result<Vec<u8>> {
|
||||
use serde_json::to_vec;
|
||||
match req {
|
||||
SyncRequest::HostKey(v) => to_vec(&self.host_key(v)?),
|
||||
SyncRequest::Meta(v) => to_vec(&self.meta(v)?),
|
||||
SyncRequest::Start(v) => to_vec(&self.start(v)?),
|
||||
SyncRequest::ApplyGraves(v) => to_vec(&self.apply_graves(v)?),
|
||||
SyncRequest::ApplyChanges(v) => to_vec(&self.apply_changes(v)?),
|
||||
SyncRequest::Chunk => to_vec(&self.chunk()?),
|
||||
SyncRequest::ApplyChunk(v) => to_vec(&self.apply_chunk(v)?),
|
||||
SyncRequest::SanityCheck(v) => to_vec(&self.sanity_check(v)?),
|
||||
SyncRequest::Finish => to_vec(&self.finish()?),
|
||||
SyncRequest::Abort => to_vec(&self.abort()?),
|
||||
SyncRequest::FullUpload(v) => to_vec(&self.upload(v)?),
|
||||
SyncRequest::FullDownload => return self.download(),
|
||||
}
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
@ -32,17 +32,17 @@ use crate::{
|
||||
all_stock_notetypes, CardTemplateSchema11, NoteType, NoteTypeID, NoteTypeSchema11,
|
||||
RenderCardOutput,
|
||||
},
|
||||
sched::cutoff::local_minutes_west_for_stamp,
|
||||
sched::new::NewCardSortOrder,
|
||||
sched::timespan::{answer_button_time, time_span},
|
||||
search::{
|
||||
concatenate_searches, negate_search, normalize_search, replace_search_term, write_nodes,
|
||||
BoolSeparator, Node, SearchNode, SortMode, StateKind, TemplateKind,
|
||||
BoolSeparator, EaseKind, Node, SearchNode, SortMode, StateKind, TemplateKind,
|
||||
},
|
||||
stats::studied_today,
|
||||
sync::{
|
||||
get_remote_sync_meta, sync_abort, sync_login, FullSyncProgress, NormalSyncProgress,
|
||||
SyncActionRequired, SyncAuth, SyncMeta, SyncOutput, SyncStage,
|
||||
get_remote_sync_meta, http::SyncRequest, sync_abort, sync_login, FullSyncProgress,
|
||||
LocalServer, NormalSyncProgress, SyncActionRequired, SyncAuth, SyncMeta, SyncOutput,
|
||||
SyncStage,
|
||||
},
|
||||
template::RenderedNode,
|
||||
text::{escape_anki_wildcards, extract_av_tags, strip_av_tags, AVTag},
|
||||
@ -66,6 +66,7 @@ use std::{
|
||||
use tokio::runtime::{self, Runtime};
|
||||
|
||||
mod dbproxy;
|
||||
mod http_sync_server;
|
||||
|
||||
struct ThrottlingProgressHandler {
|
||||
state: Arc<Mutex<ProgressState>>,
|
||||
@ -112,6 +113,7 @@ pub struct Backend {
|
||||
struct BackendState {
|
||||
remote_sync_status: RemoteSyncStatus,
|
||||
media_sync_abort: Option<AbortHandle>,
|
||||
http_sync_server: Option<LocalServer>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
@ -192,6 +194,7 @@ impl std::convert::From<SyncErrorKind> for i32 {
|
||||
SyncErrorKind::DatabaseCheckRequired => V::DatabaseCheckRequired,
|
||||
SyncErrorKind::Other => V::Other,
|
||||
SyncErrorKind::ClockIncorrect => V::ClockIncorrect,
|
||||
SyncErrorKind::SyncNotStarted => V::SyncNotStarted,
|
||||
}) as i32
|
||||
}
|
||||
}
|
||||
@ -292,11 +295,11 @@ impl From<pb::FilterToSearchIn> for Node<'_> {
|
||||
NamedFilter::AddedToday => Node::Search(SearchNode::AddedInDays(1)),
|
||||
NamedFilter::StudiedToday => Node::Search(SearchNode::Rated {
|
||||
days: 1,
|
||||
ease: None,
|
||||
ease: EaseKind::AnyAnswerButton,
|
||||
}),
|
||||
NamedFilter::AgainToday => Node::Search(SearchNode::Rated {
|
||||
days: 1,
|
||||
ease: Some(1),
|
||||
ease: EaseKind::AnswerButton(1),
|
||||
}),
|
||||
NamedFilter::New => Node::Search(SearchNode::State(StateKind::New)),
|
||||
NamedFilter::Learn => Node::Search(SearchNode::State(StateKind::Learning)),
|
||||
@ -324,6 +327,10 @@ impl From<pb::FilterToSearchIn> for Node<'_> {
|
||||
Filter::Template(u) => {
|
||||
Node::Search(SearchNode::CardTemplate(TemplateKind::Ordinal(u as u16)))
|
||||
}
|
||||
Filter::Dupe(dupe) => Node::Search(SearchNode::Duplicates {
|
||||
note_type_id: dupe.mid.unwrap_or(pb::NoteTypeId { ntid: 0 }).into(),
|
||||
text: dupe.text.into(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -533,20 +540,6 @@ impl BackendService for Backend {
|
||||
})
|
||||
}
|
||||
|
||||
fn local_minutes_west(&self, input: pb::Int64) -> BackendResult<pb::Int32> {
|
||||
Ok(pb::Int32 {
|
||||
val: local_minutes_west_for_stamp(input.val),
|
||||
})
|
||||
}
|
||||
|
||||
fn set_local_minutes_west(&self, input: pb::Int32) -> BackendResult<Empty> {
|
||||
self.with_col(|col| {
|
||||
col.transact(None, |col| {
|
||||
col.set_local_mins_west(input.val).map(Into::into)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Fetch data from DB and return rendered string.
|
||||
fn studied_today(&self, _input: pb::Empty) -> BackendResult<pb::String> {
|
||||
self.with_col(|col| col.studied_today().map(Into::into))
|
||||
@ -1303,6 +1296,11 @@ impl BackendService for Backend {
|
||||
self.with_col(|col| col.before_upload().map(Into::into))
|
||||
}
|
||||
|
||||
fn sync_server_method(&self, input: pb::SyncServerMethodIn) -> BackendResult<pb::Json> {
|
||||
let req = SyncRequest::from_method_and_data(input.method(), input.data)?;
|
||||
self.sync_server_method_inner(req).map(Into::into)
|
||||
}
|
||||
|
||||
// i18n/messages
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
@ -1694,11 +1692,11 @@ impl Backend {
|
||||
};
|
||||
|
||||
let result = if upload {
|
||||
let sync_fut = col_inner.full_upload(input.into(), progress_fn);
|
||||
let sync_fut = col_inner.full_upload(input.into(), Box::new(progress_fn));
|
||||
let abortable_sync = Abortable::new(sync_fut, abort_reg);
|
||||
rt.block_on(abortable_sync)
|
||||
} else {
|
||||
let sync_fut = col_inner.full_download(input.into(), progress_fn);
|
||||
let sync_fut = col_inner.full_download(input.into(), Box::new(progress_fn));
|
||||
let abortable_sync = Abortable::new(sync_fut, abort_reg);
|
||||
rt.block_on(abortable_sync)
|
||||
};
|
||||
|
@ -38,11 +38,18 @@ pub fn open_collection<P: Into<PathBuf>>(
|
||||
Ok(col)
|
||||
}
|
||||
|
||||
// We need to make a Builder for Collection in the future.
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn open_test_collection() -> Collection {
|
||||
open_test_collection_with_server(false)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn open_test_collection_with_server(server: bool) -> Collection {
|
||||
use crate::log;
|
||||
let i18n = I18n::new(&[""], "", log::terminal());
|
||||
open_collection(":memory:", "", "", false, i18n, log::terminal()).unwrap()
|
||||
open_collection(":memory:", "", "", server, i18n, log::terminal()).unwrap()
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
|
@ -141,11 +141,11 @@ impl Collection {
|
||||
.unwrap_or(DeckID(1))
|
||||
}
|
||||
|
||||
pub(crate) fn get_creation_mins_west(&self) -> Option<i32> {
|
||||
pub(crate) fn get_creation_utc_offset(&self) -> Option<i32> {
|
||||
self.get_config_optional(ConfigKey::CreationOffset)
|
||||
}
|
||||
|
||||
pub(crate) fn set_creation_mins_west(&self, mins: Option<i32>) -> Result<()> {
|
||||
pub(crate) fn set_creation_utc_offset(&self, mins: Option<i32>) -> Result<()> {
|
||||
if let Some(mins) = mins {
|
||||
self.set_config(ConfigKey::CreationOffset, &mins)
|
||||
} else {
|
||||
@ -153,11 +153,11 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_local_mins_west(&self) -> Option<i32> {
|
||||
pub(crate) fn get_configured_utc_offset(&self) -> Option<i32> {
|
||||
self.get_config_optional(ConfigKey::LocalOffset)
|
||||
}
|
||||
|
||||
pub(crate) fn set_local_mins_west(&self, mins: i32) -> Result<()> {
|
||||
pub(crate) fn set_configured_utc_offset(&self, mins: i32) -> Result<()> {
|
||||
self.set_config(ConfigKey::LocalOffset, &mins)
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@ pub use failure::{Error, Fail};
|
||||
use nom::error::{ErrorKind as NomErrorKind, ParseError as NomParseError};
|
||||
use reqwest::StatusCode;
|
||||
use std::{io, str::Utf8Error};
|
||||
use tempfile::PathPersistError;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, AnkiError>;
|
||||
|
||||
@ -95,6 +96,8 @@ impl AnkiError {
|
||||
SyncErrorKind::ResyncRequired => i18n.tr(TR::SyncResyncRequired),
|
||||
SyncErrorKind::ClockIncorrect => i18n.tr(TR::SyncClockOff),
|
||||
SyncErrorKind::DatabaseCheckRequired => i18n.tr(TR::SyncSanityCheckFailed),
|
||||
// server message
|
||||
SyncErrorKind::SyncNotStarted => "sync not started".into(),
|
||||
}
|
||||
.into(),
|
||||
AnkiError::NetworkError { kind, info } => {
|
||||
@ -144,6 +147,7 @@ impl AnkiError {
|
||||
SearchErrorKind::InvalidRatedEase(ctx) => i18n
|
||||
.trn(TR::SearchInvalidRatedEase, tr_strs!["val"=>(ctx)])
|
||||
.into(),
|
||||
SearchErrorKind::InvalidResched => i18n.tr(TR::SearchInvalidResched),
|
||||
SearchErrorKind::InvalidDupeMid => i18n.tr(TR::SearchInvalidDupeMid),
|
||||
SearchErrorKind::InvalidDupeText => i18n.tr(TR::SearchInvalidDupeText),
|
||||
SearchErrorKind::InvalidPropProperty => i18n.tr(TR::SearchInvalidPropProperty),
|
||||
@ -278,6 +282,7 @@ pub enum SyncErrorKind {
|
||||
Other,
|
||||
ResyncRequired,
|
||||
DatabaseCheckRequired,
|
||||
SyncNotStarted,
|
||||
}
|
||||
|
||||
fn error_for_status_code(info: String, code: StatusCode) -> AnkiError {
|
||||
@ -377,6 +382,14 @@ pub enum DBErrorKind {
|
||||
Other,
|
||||
}
|
||||
|
||||
impl From<PathPersistError> for AnkiError {
|
||||
fn from(e: PathPersistError) -> Self {
|
||||
AnkiError::IOError {
|
||||
info: e.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ParseError<'a> {
|
||||
Anki(&'a str, SearchErrorKind),
|
||||
@ -403,6 +416,7 @@ pub enum SearchErrorKind {
|
||||
InvalidRatedEase(String),
|
||||
InvalidDupeMid,
|
||||
InvalidDupeText,
|
||||
InvalidResched,
|
||||
InvalidPropProperty,
|
||||
InvalidPropOperator(String),
|
||||
InvalidPropFloat(String),
|
||||
|
@ -9,7 +9,6 @@ use crate::{
|
||||
collection::Collection,
|
||||
err::Result,
|
||||
sched::cutoff::local_minutes_west_for_stamp,
|
||||
timestamp::TimestampSecs,
|
||||
};
|
||||
|
||||
impl Collection {
|
||||
@ -43,7 +42,7 @@ impl Collection {
|
||||
show_remaining_due_counts: self.get_show_due_counts(),
|
||||
show_intervals_on_buttons: self.get_show_intervals_above_buttons(),
|
||||
time_limit_secs: self.get_answer_time_limit_secs(),
|
||||
new_timezone: self.get_creation_mins_west().is_some(),
|
||||
new_timezone: self.get_creation_utc_offset().is_some(),
|
||||
day_learn_first: self.get_day_learn_first(),
|
||||
})
|
||||
}
|
||||
@ -73,15 +72,11 @@ impl Collection {
|
||||
}
|
||||
|
||||
if s.new_timezone {
|
||||
if self.get_creation_mins_west().is_none() {
|
||||
self.set_creation_mins_west(Some(local_minutes_west_for_stamp(created.0)))?;
|
||||
if self.get_creation_utc_offset().is_none() {
|
||||
self.set_creation_utc_offset(Some(local_minutes_west_for_stamp(created.0)))?;
|
||||
}
|
||||
} else {
|
||||
self.set_creation_mins_west(None)?;
|
||||
}
|
||||
|
||||
if s.scheduler_version != 1 {
|
||||
self.set_local_mins_west(local_minutes_west_for_stamp(TimestampSecs::now().0))?;
|
||||
self.set_creation_utc_offset(None)?;
|
||||
}
|
||||
|
||||
// fixme: currently scheduler change unhandled
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use crate::timestamp::TimestampSecs;
|
||||
use crate::prelude::*;
|
||||
use chrono::{Date, Duration, FixedOffset, Local, TimeZone, Timelike};
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
@ -13,24 +13,21 @@ pub struct SchedTimingToday {
|
||||
}
|
||||
|
||||
/// Timing information for the current day.
|
||||
/// - created_secs is a UNIX timestamp of the collection creation time
|
||||
/// - created_mins_west is the offset west of UTC at the time of creation
|
||||
/// (eg UTC+10 hours is -600)
|
||||
/// - now_secs is a timestamp of the current time
|
||||
/// - now_mins_west is the current offset west of UTC
|
||||
/// - creation_secs is a UNIX timestamp of the collection creation time
|
||||
/// - creation_utc_offset is the UTC offset at collection creation time
|
||||
/// - current_secs is a timestamp of the current time
|
||||
/// - current_utc_offset is the current UTC offset
|
||||
/// - rollover_hour is the hour of the day the rollover happens (eg 4 for 4am)
|
||||
pub fn sched_timing_today_v2_new(
|
||||
created_secs: i64,
|
||||
created_mins_west: i32,
|
||||
now_secs: i64,
|
||||
now_mins_west: i32,
|
||||
creation_secs: TimestampSecs,
|
||||
creation_utc_offset: FixedOffset,
|
||||
current_secs: TimestampSecs,
|
||||
current_utc_offset: FixedOffset,
|
||||
rollover_hour: u8,
|
||||
) -> SchedTimingToday {
|
||||
// get date(times) based on timezone offsets
|
||||
let created_date = fixed_offset_from_minutes(created_mins_west)
|
||||
.timestamp(created_secs, 0)
|
||||
.date();
|
||||
let now_datetime = fixed_offset_from_minutes(now_mins_west).timestamp(now_secs, 0);
|
||||
let created_date = creation_secs.datetime(creation_utc_offset).date();
|
||||
let now_datetime = current_secs.datetime(current_utc_offset);
|
||||
let today = now_datetime.date();
|
||||
|
||||
// rollover
|
||||
@ -118,9 +115,9 @@ fn v1_creation_date_adjusted_to_hour_inner(crt: i64, hour: u8, offset: FixedOffs
|
||||
.timestamp()
|
||||
}
|
||||
|
||||
fn sched_timing_today_v1(crt: i64, now: i64) -> SchedTimingToday {
|
||||
let days_elapsed = (now - crt) / 86_400;
|
||||
let next_day_at = crt + (days_elapsed + 1) * 86_400;
|
||||
fn sched_timing_today_v1(crt: TimestampSecs, now: TimestampSecs) -> SchedTimingToday {
|
||||
let days_elapsed = (now.0 - crt.0) / 86_400;
|
||||
let next_day_at = crt.0 + (days_elapsed + 1) * 86_400;
|
||||
SchedTimingToday {
|
||||
days_elapsed: days_elapsed as u32,
|
||||
next_day_at,
|
||||
@ -128,26 +125,24 @@ fn sched_timing_today_v1(crt: i64, now: i64) -> SchedTimingToday {
|
||||
}
|
||||
|
||||
fn sched_timing_today_v2_legacy(
|
||||
crt: i64,
|
||||
crt: TimestampSecs,
|
||||
rollover: u8,
|
||||
now: i64,
|
||||
mins_west: i32,
|
||||
now: TimestampSecs,
|
||||
current_utc_offset: FixedOffset,
|
||||
) -> SchedTimingToday {
|
||||
let offset = fixed_offset_from_minutes(mins_west);
|
||||
|
||||
let crt_at_rollover = offset
|
||||
.timestamp(crt, 0)
|
||||
let crt_at_rollover = crt
|
||||
.datetime(current_utc_offset)
|
||||
.date()
|
||||
.and_hms(rollover as u32, 0, 0)
|
||||
.timestamp();
|
||||
let days_elapsed = (now - crt_at_rollover) / 86_400;
|
||||
let days_elapsed = (now.0 - crt_at_rollover) / 86_400;
|
||||
|
||||
let mut next_day_at = offset
|
||||
.timestamp(now, 0)
|
||||
let mut next_day_at = now
|
||||
.datetime(current_utc_offset)
|
||||
.date()
|
||||
.and_hms(rollover as u32, 0, 0)
|
||||
.timestamp();
|
||||
if next_day_at < now {
|
||||
if next_day_at < now.0 {
|
||||
next_day_at += 86_400;
|
||||
}
|
||||
|
||||
@ -159,27 +154,33 @@ fn sched_timing_today_v2_legacy(
|
||||
|
||||
// ----------------------------------
|
||||
|
||||
/// Based on provided input, get timing info from the relevant function.
|
||||
/// Decide which scheduler timing to use based on the provided input,
|
||||
/// and return the relevant timing info.
|
||||
pub(crate) fn sched_timing_today(
|
||||
created_secs: TimestampSecs,
|
||||
now_secs: TimestampSecs,
|
||||
created_mins_west: Option<i32>,
|
||||
now_mins_west: Option<i32>,
|
||||
creation_secs: TimestampSecs,
|
||||
current_secs: TimestampSecs,
|
||||
creation_utc_offset: Option<FixedOffset>,
|
||||
current_utc_offset: FixedOffset,
|
||||
rollover_hour: Option<u8>,
|
||||
) -> SchedTimingToday {
|
||||
let now_west = now_mins_west.unwrap_or_else(|| local_minutes_west_for_stamp(now_secs.0));
|
||||
match (rollover_hour, created_mins_west) {
|
||||
match (rollover_hour, creation_utc_offset) {
|
||||
(None, _) => {
|
||||
// if rollover unset, v1 scheduler
|
||||
sched_timing_today_v1(created_secs.0, now_secs.0)
|
||||
sched_timing_today_v1(creation_secs, current_secs)
|
||||
}
|
||||
(Some(roll), None) => {
|
||||
(Some(rollover), None) => {
|
||||
// if creationOffset unset, v2 scheduler with legacy cutoff handling
|
||||
sched_timing_today_v2_legacy(created_secs.0, roll, now_secs.0, now_west)
|
||||
sched_timing_today_v2_legacy(creation_secs, rollover, current_secs, current_utc_offset)
|
||||
}
|
||||
(Some(roll), Some(crt_west)) => {
|
||||
(Some(rollover), Some(creation_utc_offset)) => {
|
||||
// v2 scheduler, new cutoff handling
|
||||
sched_timing_today_v2_new(created_secs.0, crt_west, now_secs.0, now_west, roll)
|
||||
sched_timing_today_v2_new(
|
||||
creation_secs,
|
||||
creation_utc_offset,
|
||||
current_secs,
|
||||
current_utc_offset,
|
||||
rollover,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -192,6 +193,10 @@ mod test {
|
||||
// static timezone for tests
|
||||
const AEST_MINS_WEST: i32 = -600;
|
||||
|
||||
fn aest_offset() -> FixedOffset {
|
||||
FixedOffset::west(AEST_MINS_WEST * 60)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fixed_offset() {
|
||||
let offset = fixed_offset_from_minutes(AEST_MINS_WEST);
|
||||
@ -200,6 +205,10 @@ mod test {
|
||||
|
||||
// helper
|
||||
fn elap(start: i64, end: i64, start_west: i32, end_west: i32, rollhour: u8) -> u32 {
|
||||
let start = TimestampSecs(start);
|
||||
let end = TimestampSecs(end);
|
||||
let start_west = FixedOffset::west(start_west * 60);
|
||||
let end_west = FixedOffset::west(end_west * 60);
|
||||
let today = sched_timing_today_v2_new(start, start_west, end, end_west, rollhour);
|
||||
today.days_elapsed
|
||||
}
|
||||
@ -312,10 +321,10 @@ mod test {
|
||||
let now = Local.ymd(2019, 1, 3).and_hms(2, 0, 0);
|
||||
let next_day_at = Local.ymd(2019, 1, 3).and_hms(rollhour, 0, 0);
|
||||
let today = sched_timing_today_v2_new(
|
||||
crt.timestamp(),
|
||||
crt.offset().utc_minus_local() / 60,
|
||||
now.timestamp(),
|
||||
now.offset().utc_minus_local() / 60,
|
||||
TimestampSecs(crt.timestamp()),
|
||||
*crt.offset(),
|
||||
TimestampSecs(now.timestamp()),
|
||||
*now.offset(),
|
||||
rollhour as u8,
|
||||
);
|
||||
assert_eq!(today.next_day_at, next_day_at.timestamp());
|
||||
@ -324,10 +333,10 @@ mod test {
|
||||
let now = Local.ymd(2019, 1, 3).and_hms(rollhour, 0, 0);
|
||||
let next_day_at = Local.ymd(2019, 1, 4).and_hms(rollhour, 0, 0);
|
||||
let today = sched_timing_today_v2_new(
|
||||
crt.timestamp(),
|
||||
crt.offset().utc_minus_local() / 60,
|
||||
now.timestamp(),
|
||||
now.offset().utc_minus_local() / 60,
|
||||
TimestampSecs(crt.timestamp()),
|
||||
*crt.offset(),
|
||||
TimestampSecs(now.timestamp()),
|
||||
*now.offset(),
|
||||
rollhour as u8,
|
||||
);
|
||||
assert_eq!(today.next_day_at, next_day_at.timestamp());
|
||||
@ -336,10 +345,10 @@ mod test {
|
||||
let now = Local.ymd(2019, 1, 3).and_hms(rollhour + 3, 0, 0);
|
||||
let next_day_at = Local.ymd(2019, 1, 4).and_hms(rollhour, 0, 0);
|
||||
let today = sched_timing_today_v2_new(
|
||||
crt.timestamp(),
|
||||
crt.offset().utc_minus_local() / 60,
|
||||
now.timestamp(),
|
||||
now.offset().utc_minus_local() / 60,
|
||||
TimestampSecs(crt.timestamp()),
|
||||
*crt.offset(),
|
||||
TimestampSecs(now.timestamp()),
|
||||
*now.offset(),
|
||||
rollhour as u8,
|
||||
);
|
||||
assert_eq!(today.next_day_at, next_day_at.timestamp());
|
||||
@ -347,10 +356,10 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn legacy_timing() {
|
||||
let now = 1584491078;
|
||||
let now = TimestampSecs(1584491078);
|
||||
|
||||
assert_eq!(
|
||||
sched_timing_today_v1(1575226800, now),
|
||||
sched_timing_today_v1(TimestampSecs(1575226800), now),
|
||||
SchedTimingToday {
|
||||
days_elapsed: 107,
|
||||
next_day_at: 1584558000
|
||||
@ -358,7 +367,7 @@ mod test {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
sched_timing_today_v2_legacy(1533564000, 0, now, AEST_MINS_WEST),
|
||||
sched_timing_today_v2_legacy(TimestampSecs(1533564000), 0, now, aest_offset()),
|
||||
SchedTimingToday {
|
||||
days_elapsed: 589,
|
||||
next_day_at: 1584540000
|
||||
@ -366,7 +375,7 @@ mod test {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
sched_timing_today_v2_legacy(1524038400, 4, now, AEST_MINS_WEST),
|
||||
sched_timing_today_v2_legacy(TimestampSecs(1524038400), 4, now, aest_offset()),
|
||||
SchedTimingToday {
|
||||
days_elapsed: 700,
|
||||
next_day_at: 1584554400
|
||||
|
@ -1,9 +1,7 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use crate::{
|
||||
collection::Collection, config::SchedulerVersion, err::Result, timestamp::TimestampSecs,
|
||||
};
|
||||
use crate::{collection::Collection, config::SchedulerVersion, err::Result, prelude::*};
|
||||
|
||||
pub mod bury_and_suspend;
|
||||
pub(crate) mod congrats;
|
||||
@ -15,8 +13,8 @@ pub mod timespan;
|
||||
|
||||
use chrono::FixedOffset;
|
||||
use cutoff::{
|
||||
fixed_offset_from_minutes, local_minutes_west_for_stamp, sched_timing_today,
|
||||
v1_creation_date_adjusted_to_hour, v1_rollover_from_creation_stamp, SchedTimingToday,
|
||||
sched_timing_today, v1_creation_date_adjusted_to_hour, v1_rollover_from_creation_stamp,
|
||||
SchedTimingToday,
|
||||
};
|
||||
|
||||
impl Collection {
|
||||
@ -29,37 +27,62 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub(crate) fn timing_for_timestamp(&self, now: TimestampSecs) -> Result<SchedTimingToday> {
|
||||
let local_offset = if self.server {
|
||||
self.get_local_mins_west()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let current_utc_offset = self.local_utc_offset_for_user()?;
|
||||
|
||||
let rollover_hour = match self.sched_ver() {
|
||||
SchedulerVersion::V1 => None,
|
||||
SchedulerVersion::V2 => self.get_v2_rollover().or(Some(4)),
|
||||
SchedulerVersion::V2 => {
|
||||
let configured_rollover = self.get_v2_rollover();
|
||||
match configured_rollover {
|
||||
None => {
|
||||
// an older Anki version failed to set this; correct
|
||||
// the issue
|
||||
self.set_v2_rollover(4)?;
|
||||
Some(4)
|
||||
}
|
||||
val => val,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(sched_timing_today(
|
||||
self.storage.creation_stamp()?,
|
||||
now,
|
||||
self.get_creation_mins_west(),
|
||||
local_offset,
|
||||
self.creation_utc_offset(),
|
||||
current_utc_offset,
|
||||
rollover_hour,
|
||||
))
|
||||
}
|
||||
|
||||
/// Get the local timezone.
|
||||
/// We could use this to simplify timing_for_timestamp() in the future
|
||||
pub(crate) fn local_offset(&self) -> FixedOffset {
|
||||
let local_mins_west = if self.server {
|
||||
self.get_local_mins_west()
|
||||
/// In the client case, return the current local timezone offset,
|
||||
/// ensuring the config reflects the current value.
|
||||
/// In the server case, return the value set in the config, and
|
||||
/// fall back on UTC if it's missing/invalid.
|
||||
pub(crate) fn local_utc_offset_for_user(&self) -> Result<FixedOffset> {
|
||||
let config_tz = self
|
||||
.get_configured_utc_offset()
|
||||
.and_then(|v| FixedOffset::west_opt(v * 60))
|
||||
.unwrap_or_else(|| FixedOffset::west(0));
|
||||
|
||||
let local_tz = TimestampSecs::now().local_utc_offset();
|
||||
|
||||
Ok(if self.server {
|
||||
config_tz
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let local_mins_west =
|
||||
local_mins_west.unwrap_or_else(|| local_minutes_west_for_stamp(TimestampSecs::now().0));
|
||||
fixed_offset_from_minutes(local_mins_west)
|
||||
// if the timezone has changed, update the config
|
||||
if config_tz != local_tz {
|
||||
self.set_configured_utc_offset(local_tz.utc_minus_local() / 60)?;
|
||||
}
|
||||
local_tz
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the timezone offset at collection creation time. This should
|
||||
/// only be set when the V2 scheduler is active and the new timezone
|
||||
/// code is enabled.
|
||||
fn creation_utc_offset(&self) -> Option<FixedOffset> {
|
||||
self.get_creation_utc_offset()
|
||||
.and_then(|v| FixedOffset::west_opt(v * 60))
|
||||
}
|
||||
|
||||
pub fn rollover_for_current_scheduler(&self) -> Result<u8> {
|
||||
|
@ -5,7 +5,7 @@ mod sqlwriter;
|
||||
mod writer;
|
||||
|
||||
pub use cards::SortMode;
|
||||
pub use parser::{Node, PropertyKind, SearchNode, StateKind, TemplateKind};
|
||||
pub use parser::{EaseKind, Node, PropertyKind, SearchNode, StateKind, TemplateKind};
|
||||
pub use writer::{
|
||||
concatenate_searches, negate_search, normalize_search, replace_search_term, write_nodes,
|
||||
BoolSeparator,
|
||||
|
@ -58,7 +58,7 @@ pub enum SearchNode<'a> {
|
||||
NoteType(Cow<'a, str>),
|
||||
Rated {
|
||||
days: u32,
|
||||
ease: Option<u8>,
|
||||
ease: EaseKind,
|
||||
},
|
||||
Tag(Cow<'a, str>),
|
||||
Duplicates {
|
||||
@ -107,6 +107,13 @@ pub enum TemplateKind<'a> {
|
||||
Name(Cow<'a, str>),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum EaseKind {
|
||||
AnswerButton(u8),
|
||||
AnyAnswerButton,
|
||||
ManualReschedule,
|
||||
}
|
||||
|
||||
/// Parse the input string into a list of nodes.
|
||||
pub(super) fn parse(input: &str) -> Result<Vec<Node>> {
|
||||
let input = input.trim();
|
||||
@ -302,6 +309,7 @@ fn search_node_for_text_with_argument<'a>(
|
||||
"tag" => SearchNode::Tag(unescape(val)?),
|
||||
"card" => parse_template(val)?,
|
||||
"flag" => parse_flag(val)?,
|
||||
"resched" => parse_resched(val)?,
|
||||
"prop" => parse_prop(val)?,
|
||||
"added" => parse_added(val)?,
|
||||
"edited" => parse_edited(val)?,
|
||||
@ -314,7 +322,7 @@ fn search_node_for_text_with_argument<'a>(
|
||||
"re" => SearchNode::Regex(unescape_quotes(val)),
|
||||
"nc" => SearchNode::NoCombining(unescape(val)?),
|
||||
"w" => SearchNode::WordBoundary(unescape(val)?),
|
||||
"dupe" => parse_dupes(val)?,
|
||||
"dupe" => parse_dupe(val)?,
|
||||
// anything else is a field search
|
||||
_ => parse_single_field(key, val)?,
|
||||
})
|
||||
@ -340,8 +348,20 @@ fn parse_flag(s: &str) -> ParseResult<SearchNode> {
|
||||
}
|
||||
}
|
||||
|
||||
/// eg resched:3
|
||||
fn parse_resched(s: &str) -> ParseResult<SearchNode> {
|
||||
if let Ok(d) = s.parse::<u32>() {
|
||||
Ok(SearchNode::Rated {
|
||||
days: d.max(1).min(365),
|
||||
ease: EaseKind::ManualReschedule,
|
||||
})
|
||||
} else {
|
||||
Err(parse_failure(s, FailKind::InvalidResched))
|
||||
}
|
||||
}
|
||||
|
||||
/// eg prop:ivl>3, prop:ease!=2.5
|
||||
fn parse_prop(s: &str) -> ParseResult<SearchNode<'static>> {
|
||||
fn parse_prop(s: &str) -> ParseResult<SearchNode> {
|
||||
let (tail, prop) = alt::<_, _, ParseError, _>((
|
||||
tag("ivl"),
|
||||
tag("due"),
|
||||
@ -420,15 +440,15 @@ fn parse_edited(s: &str) -> ParseResult<SearchNode> {
|
||||
}
|
||||
|
||||
/// eg rated:3 or rated:10:2
|
||||
/// second arg must be between 0-4
|
||||
/// second arg must be between 1-4
|
||||
fn parse_rated(s: &str) -> ParseResult<SearchNode> {
|
||||
let mut it = s.splitn(2, ':');
|
||||
if let Ok(d) = it.next().unwrap().parse::<u32>() {
|
||||
let days = d.max(1).min(365);
|
||||
let ease = if let Some(tail) = it.next() {
|
||||
if let Ok(u) = tail.parse::<u8>() {
|
||||
if u < 5 {
|
||||
Some(u)
|
||||
if u > 0 && u < 5 {
|
||||
EaseKind::AnswerButton(u)
|
||||
} else {
|
||||
return Err(parse_failure(
|
||||
s,
|
||||
@ -442,7 +462,7 @@ fn parse_rated(s: &str) -> ParseResult<SearchNode> {
|
||||
));
|
||||
}
|
||||
} else {
|
||||
None
|
||||
EaseKind::AnyAnswerButton
|
||||
};
|
||||
Ok(SearchNode::Rated { days, ease })
|
||||
} else {
|
||||
@ -495,14 +515,14 @@ fn check_id_list(s: &str) -> ParseResult<&str> {
|
||||
}
|
||||
}
|
||||
|
||||
/// eg dupes:1231,hello
|
||||
fn parse_dupes(s: &str) -> ParseResult<SearchNode> {
|
||||
/// eg dupe:1231,hello
|
||||
fn parse_dupe(s: &str) -> ParseResult<SearchNode> {
|
||||
let mut it = s.splitn(2, ',');
|
||||
if let Ok(mid) = it.next().unwrap().parse::<NoteTypeID>() {
|
||||
if let Some(text) = it.next() {
|
||||
Ok(SearchNode::Duplicates {
|
||||
note_type_id: mid,
|
||||
text: unescape_quotes(text),
|
||||
text: unescape_quotes_and_backslashes(text),
|
||||
})
|
||||
} else {
|
||||
Err(parse_failure(s, FailKind::InvalidDupeText))
|
||||
@ -537,6 +557,15 @@ fn unescape_quotes(s: &str) -> Cow<str> {
|
||||
}
|
||||
}
|
||||
|
||||
/// For non-globs like dupe text without any assumption about the content
|
||||
fn unescape_quotes_and_backslashes(s: &str) -> Cow<str> {
|
||||
if s.contains('"') || s.contains('\\') {
|
||||
s.replace(r#"\""#, "\"").replace(r"\\", r"\").into()
|
||||
} else {
|
||||
s.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Unescape chars with special meaning to the parser.
|
||||
fn unescape(txt: &str) -> ParseResult<Cow<str>> {
|
||||
if let Some(seq) = invalid_escape_sequence(txt) {
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use super::parser::{Node, PropertyKind, SearchNode, StateKind, TemplateKind};
|
||||
use super::parser::{EaseKind, Node, PropertyKind, SearchNode, StateKind, TemplateKind};
|
||||
use crate::{
|
||||
card::{CardQueue, CardType},
|
||||
collection::Collection,
|
||||
@ -123,7 +123,7 @@ impl SqlWriter<'_> {
|
||||
self.write_single_field(&norm(field), &self.norm_note(text), *is_re)?
|
||||
}
|
||||
SearchNode::Duplicates { note_type_id, text } => {
|
||||
self.write_dupes(*note_type_id, &self.norm_note(text))?
|
||||
self.write_dupe(*note_type_id, &self.norm_note(text))?
|
||||
}
|
||||
SearchNode::Regex(re) => self.write_regex(&self.norm_note(re)),
|
||||
SearchNode::NoCombining(text) => self.write_no_combining(&self.norm_note(text)),
|
||||
@ -144,7 +144,7 @@ impl SqlWriter<'_> {
|
||||
write!(self.sql, "c.did = {}", did).unwrap();
|
||||
}
|
||||
SearchNode::NoteType(notetype) => self.write_note_type(&norm(notetype))?,
|
||||
SearchNode::Rated { days, ease } => self.write_rated(*days, *ease)?,
|
||||
SearchNode::Rated { days, ease } => self.write_rated(*days, ease)?,
|
||||
|
||||
SearchNode::Tag(tag) => self.write_tag(&norm(tag))?,
|
||||
SearchNode::State(state) => self.write_state(state)?,
|
||||
@ -214,20 +214,22 @@ impl SqlWriter<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_rated(&mut self, days: u32, ease: Option<u8>) -> Result<()> {
|
||||
fn write_rated(&mut self, days: u32, ease: &EaseKind) -> Result<()> {
|
||||
let today_cutoff = self.col.timing_today()?.next_day_at;
|
||||
let target_cutoff_ms = (today_cutoff - 86_400 * i64::from(days)) * 1_000;
|
||||
write!(
|
||||
self.sql,
|
||||
"c.id in (select cid from revlog where id>{}",
|
||||
target_cutoff_ms
|
||||
target_cutoff_ms,
|
||||
)
|
||||
.unwrap();
|
||||
if let Some(ease) = ease {
|
||||
write!(self.sql, " and ease={})", ease).unwrap();
|
||||
} else {
|
||||
write!(self.sql, ")").unwrap();
|
||||
|
||||
match ease {
|
||||
EaseKind::AnswerButton(u) => write!(self.sql, " and ease = {})", u),
|
||||
EaseKind::AnyAnswerButton => write!(self.sql, " and ease > 0)"),
|
||||
EaseKind::ManualReschedule => write!(self.sql, " and ease = 0)"),
|
||||
}
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -441,7 +443,7 @@ impl SqlWriter<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_dupes(&mut self, ntid: NoteTypeID, text: &str) -> Result<()> {
|
||||
fn write_dupe(&mut self, ntid: NoteTypeID, text: &str) -> Result<()> {
|
||||
let text_nohtml = strip_html_preserving_media_filenames(text);
|
||||
let csum = field_checksum(text_nohtml.as_ref());
|
||||
|
||||
@ -717,19 +719,28 @@ mod test {
|
||||
assert_eq!(
|
||||
s(ctx, "rated:2").0,
|
||||
format!(
|
||||
"(c.id in (select cid from revlog where id>{}))",
|
||||
"(c.id in (select cid from revlog where id>{} and ease > 0))",
|
||||
(timing.next_day_at - (86_400 * 2)) * 1_000
|
||||
)
|
||||
);
|
||||
assert_eq!(
|
||||
s(ctx, "rated:400:1").0,
|
||||
format!(
|
||||
"(c.id in (select cid from revlog where id>{} and ease=1))",
|
||||
"(c.id in (select cid from revlog where id>{} and ease = 1))",
|
||||
(timing.next_day_at - (86_400 * 365)) * 1_000
|
||||
)
|
||||
);
|
||||
assert_eq!(s(ctx, "rated:0").0, s(ctx, "rated:1").0);
|
||||
|
||||
// resched
|
||||
assert_eq!(
|
||||
s(ctx, "resched:400").0,
|
||||
format!(
|
||||
"(c.id in (select cid from revlog where id>{} and ease = 0))",
|
||||
(timing.next_day_at - (86_400 * 365)) * 1_000
|
||||
)
|
||||
);
|
||||
|
||||
// props
|
||||
assert_eq!(s(ctx, "prop:lapses=3").0, "(lapses = 3)".to_string());
|
||||
assert_eq!(s(ctx, "prop:ease>=2.5").0, "(factor >= 2500)".to_string());
|
||||
|
@ -5,7 +5,7 @@ use crate::{
|
||||
decks::DeckID as DeckIDType,
|
||||
err::Result,
|
||||
notetype::NoteTypeID as NoteTypeIDType,
|
||||
search::parser::{parse, Node, PropertyKind, SearchNode, StateKind, TemplateKind},
|
||||
search::parser::{parse, EaseKind, Node, PropertyKind, SearchNode, StateKind, TemplateKind},
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use std::mem;
|
||||
@ -119,7 +119,7 @@ fn write_search_node(node: &SearchNode) -> String {
|
||||
NoteType(s) => quote(&format!("note:{}", s)),
|
||||
Rated { days, ease } => write_rated(days, ease),
|
||||
Tag(s) => quote(&format!("tag:{}", s)),
|
||||
Duplicates { note_type_id, text } => quote(&format!("dupes:{},{}", note_type_id, text)),
|
||||
Duplicates { note_type_id, text } => write_dupe(note_type_id, text),
|
||||
State(k) => write_state(k),
|
||||
Flag(u) => format!("\"flag:{}\"", u),
|
||||
NoteIDs(s) => format!("\"nid:{}\"", s),
|
||||
@ -154,13 +154,21 @@ fn write_template(template: &TemplateKind) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn write_rated(days: &u32, ease: &Option<u8>) -> String {
|
||||
fn write_rated(days: &u32, ease: &EaseKind) -> String {
|
||||
use EaseKind::*;
|
||||
match ease {
|
||||
Some(u) => format!("\"rated:{}:{}\"", days, u),
|
||||
None => format!("\"rated:{}\"", days),
|
||||
AnswerButton(n) => format!("\"rated:{}:{}\"", days, n),
|
||||
AnyAnswerButton => format!("\"rated:{}\"", days),
|
||||
ManualReschedule => format!("\"resched:{}\"", days),
|
||||
}
|
||||
}
|
||||
|
||||
/// Escape double quotes and backslashes: \"
|
||||
fn write_dupe(note_type_id: &NoteTypeIDType, text: &str) -> String {
|
||||
let esc = text.replace(r"\", r"\\").replace('"', r#"\""#);
|
||||
format!("\"dupe:{},{}\"", note_type_id, esc)
|
||||
}
|
||||
|
||||
fn write_state(kind: &StateKind) -> String {
|
||||
use StateKind::*;
|
||||
format!(
|
||||
|
@ -58,7 +58,7 @@ struct RevlogText {
|
||||
impl Collection {
|
||||
pub fn card_stats(&mut self, cid: CardID) -> Result<String> {
|
||||
let stats = self.gather_card_stats(cid)?;
|
||||
Ok(self.card_stats_to_string(stats))
|
||||
self.card_stats_to_string(stats)
|
||||
}
|
||||
|
||||
fn gather_card_stats(&mut self, cid: CardID) -> Result<CardStats> {
|
||||
@ -126,8 +126,8 @@ impl Collection {
|
||||
})
|
||||
}
|
||||
|
||||
fn card_stats_to_string(&self, cs: CardStats) -> String {
|
||||
let offset = self.local_offset();
|
||||
fn card_stats_to_string(&self, cs: CardStats) -> Result<String> {
|
||||
let offset = self.local_utc_offset_for_user()?;
|
||||
let i18n = &self.i18n;
|
||||
|
||||
let mut stats = vec![(
|
||||
@ -216,13 +216,13 @@ impl Collection {
|
||||
taken_secs: i18n.tr(TR::CardStatsReviewLogTimeTaken).into(),
|
||||
};
|
||||
|
||||
CardStatsTemplate {
|
||||
Ok(CardStatsTemplate {
|
||||
stats,
|
||||
revlog,
|
||||
revlog_titles,
|
||||
}
|
||||
.render()
|
||||
.unwrap()
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ impl Collection {
|
||||
0
|
||||
});
|
||||
|
||||
let offset = self.local_offset();
|
||||
let offset = self.local_utc_offset_for_user()?;
|
||||
let local_offset_secs = offset.local_minus_utc() as i64;
|
||||
|
||||
let cards = self.storage.all_searched_cards()?;
|
||||
|
@ -229,7 +229,7 @@ impl super::SqliteStorage {
|
||||
.prepare_cached("select null from cards")?
|
||||
.query(NO_PARAMS)?
|
||||
.next()
|
||||
.map(|o| o.is_none())
|
||||
.map(|o| o.is_some())
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,7 @@ mod tag;
|
||||
mod upgrades;
|
||||
|
||||
pub(crate) use sqlite::SqliteStorage;
|
||||
pub(crate) use sync::open_and_check_sqlite_file;
|
||||
|
||||
use std::fmt::Write;
|
||||
|
||||
|
@ -170,7 +170,7 @@ impl SqliteStorage {
|
||||
"update col set crt=?, scm=?, ver=?, conf=?",
|
||||
params![
|
||||
crt,
|
||||
crt * 1000,
|
||||
TimestampMillis::now(),
|
||||
SCHEMA_STARTING_VERSION,
|
||||
&schema11_config_as_string()
|
||||
],
|
||||
|
@ -1,9 +1,11 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use super::*;
|
||||
use crate::prelude::*;
|
||||
use rusqlite::{params, types::FromSql, ToSql, NO_PARAMS};
|
||||
use rusqlite::{params, types::FromSql, Connection, ToSql, NO_PARAMS};
|
||||
|
||||
impl SqliteStorage {
|
||||
pub(crate) fn usn(&self, server: bool) -> Result<Usn> {
|
||||
@ -59,3 +61,28 @@ impl SqliteStorage {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Return error if file is unreadable, fails the sqlite
|
||||
/// integrity check, or is not in the 'delete' journal mode.
|
||||
/// On success, returns the opened DB.
|
||||
pub(crate) fn open_and_check_sqlite_file(path: &Path) -> Result<Connection> {
|
||||
let db = Connection::open(path)?;
|
||||
match db.pragma_query_value(None, "integrity_check", |row| row.get::<_, String>(0)) {
|
||||
Ok(s) => {
|
||||
if s != "ok" {
|
||||
return Err(AnkiError::invalid_input(format!("corrupt: {}", s)));
|
||||
}
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
match db.pragma_query_value(None, "journal_mode", |row| row.get::<_, String>(0)) {
|
||||
Ok(s) => {
|
||||
if s == "delete" {
|
||||
Ok(db)
|
||||
} else {
|
||||
Err(AnkiError::invalid_input(format!("corrupt: {}", s)))
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
121
rslib/src/sync/http.rs
Normal file
121
rslib/src/sync/http.rs
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use std::{fs, path::PathBuf};
|
||||
|
||||
use super::{Chunk, Graves, SanityCheckCounts, UnchunkedChanges};
|
||||
use crate::backend_proto::sync_server_method_in::Method;
|
||||
use crate::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum SyncRequest {
|
||||
HostKey(HostKeyIn),
|
||||
Meta(MetaIn),
|
||||
Start(StartIn),
|
||||
ApplyGraves(ApplyGravesIn),
|
||||
ApplyChanges(ApplyChangesIn),
|
||||
Chunk,
|
||||
ApplyChunk(ApplyChunkIn),
|
||||
#[serde(rename = "sanityCheck2")]
|
||||
SanityCheck(SanityCheckIn),
|
||||
Finish,
|
||||
Abort,
|
||||
#[serde(rename = "upload")]
|
||||
FullUpload(PathBuf),
|
||||
#[serde(rename = "download")]
|
||||
FullDownload,
|
||||
}
|
||||
|
||||
impl SyncRequest {
|
||||
/// Return method name and payload bytes.
|
||||
pub(crate) fn into_method_and_data(self) -> Result<(&'static str, Vec<u8>)> {
|
||||
use serde_json::to_vec;
|
||||
Ok(match self {
|
||||
SyncRequest::HostKey(v) => ("hostKey", to_vec(&v)?),
|
||||
SyncRequest::Meta(v) => ("meta", to_vec(&v)?),
|
||||
SyncRequest::Start(v) => ("start", to_vec(&v)?),
|
||||
SyncRequest::ApplyGraves(v) => ("applyGraves", to_vec(&v)?),
|
||||
SyncRequest::ApplyChanges(v) => ("applyChanges", to_vec(&v)?),
|
||||
SyncRequest::Chunk => ("chunk", b"{}".to_vec()),
|
||||
SyncRequest::ApplyChunk(v) => ("applyChunk", to_vec(&v)?),
|
||||
SyncRequest::SanityCheck(v) => ("sanityCheck2", to_vec(&v)?),
|
||||
SyncRequest::Finish => ("finish", b"{}".to_vec()),
|
||||
SyncRequest::Abort => ("abort", b"{}".to_vec()),
|
||||
SyncRequest::FullUpload(v) => {
|
||||
// fixme: stream in the data instead, in a different call
|
||||
("upload", fs::read(&v)?)
|
||||
}
|
||||
SyncRequest::FullDownload => ("download", b"{}".to_vec()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn from_method_and_data(method: Method, data: Vec<u8>) -> Result<Self> {
|
||||
use serde_json::from_slice;
|
||||
Ok(match method {
|
||||
Method::HostKey => SyncRequest::HostKey(from_slice(&data)?),
|
||||
Method::Meta => SyncRequest::Meta(from_slice(&data)?),
|
||||
Method::Start => SyncRequest::Start(from_slice(&data)?),
|
||||
Method::ApplyGraves => SyncRequest::ApplyGraves(from_slice(&data)?),
|
||||
Method::ApplyChanges => SyncRequest::ApplyChanges(from_slice(&data)?),
|
||||
Method::Chunk => SyncRequest::Chunk,
|
||||
Method::ApplyChunk => SyncRequest::ApplyChunk(from_slice(&data)?),
|
||||
Method::SanityCheck => SyncRequest::SanityCheck(from_slice(&data)?),
|
||||
Method::Finish => SyncRequest::Finish,
|
||||
Method::Abort => SyncRequest::Abort,
|
||||
Method::FullUpload => {
|
||||
let path = PathBuf::from(String::from_utf8(data).expect("path was not in utf8"));
|
||||
SyncRequest::FullUpload(path)
|
||||
}
|
||||
Method::FullDownload => SyncRequest::FullDownload,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct HostKeyIn {
|
||||
#[serde(rename = "u")]
|
||||
pub username: String,
|
||||
#[serde(rename = "p")]
|
||||
pub password: String,
|
||||
}
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct HostKeyOut {
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MetaIn {
|
||||
#[serde(rename = "v")]
|
||||
pub sync_version: u8,
|
||||
#[serde(rename = "cv")]
|
||||
pub client_version: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct StartIn {
|
||||
#[serde(rename = "minUsn")]
|
||||
pub client_usn: Usn,
|
||||
#[serde(rename = "lnewer")]
|
||||
pub local_is_newer: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ApplyGravesIn {
|
||||
pub chunk: Graves,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ApplyChangesIn {
|
||||
pub changes: UnchunkedChanges,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ApplyChunkIn {
|
||||
pub chunk: Chunk,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct SanityCheckIn {
|
||||
pub client: SanityCheckCounts,
|
||||
}
|
@ -1,75 +1,41 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
use super::*;
|
||||
use super::{server::SyncServer, SYNC_VERSION_MAX};
|
||||
use super::{
|
||||
Chunk, FullSyncProgress, Graves, SanityCheckCounts, SanityCheckOut, SyncMeta, UnchunkedChanges,
|
||||
};
|
||||
use crate::prelude::*;
|
||||
use crate::{err::SyncErrorKind, notes::guid, version::sync_client_version};
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use futures::Stream;
|
||||
use futures::StreamExt;
|
||||
use reqwest::Body;
|
||||
use reqwest::{multipart, Client, Response};
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
use super::http::{
|
||||
ApplyChangesIn, ApplyChunkIn, ApplyGravesIn, HostKeyIn, HostKeyOut, MetaIn, SanityCheckIn,
|
||||
StartIn, SyncRequest,
|
||||
};
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
// fixme: 100mb limit
|
||||
|
||||
static SYNC_VERSION: u8 = 10;
|
||||
pub type FullSyncProgressFn = Box<dyn FnMut(FullSyncProgress, bool) + Send + Sync + 'static>;
|
||||
|
||||
pub struct HTTPSyncClient {
|
||||
hkey: Option<String>,
|
||||
skey: String,
|
||||
client: Client,
|
||||
endpoint: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct HostKeyIn<'a> {
|
||||
#[serde(rename = "u")]
|
||||
username: &'a str,
|
||||
#[serde(rename = "p")]
|
||||
password: &'a str,
|
||||
}
|
||||
#[derive(Deserialize)]
|
||||
struct HostKeyOut {
|
||||
key: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct MetaIn<'a> {
|
||||
#[serde(rename = "v")]
|
||||
sync_version: u8,
|
||||
#[serde(rename = "cv")]
|
||||
client_version: &'a str,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct StartIn {
|
||||
#[serde(rename = "minUsn")]
|
||||
local_usn: Usn,
|
||||
#[serde(rename = "offset")]
|
||||
minutes_west: Option<i32>,
|
||||
// only used to modify behaviour of changes()
|
||||
#[serde(rename = "lnewer")]
|
||||
local_is_newer: bool,
|
||||
// used by 2.0 clients
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
local_graves: Option<Graves>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct ApplyGravesIn {
|
||||
chunk: Graves,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct ApplyChangesIn {
|
||||
changes: UnchunkedChanges,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct ApplyChunkIn {
|
||||
chunk: Chunk,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct SanityCheckIn {
|
||||
client: SanityCheckCounts,
|
||||
full: bool,
|
||||
full_sync_progress_fn: Option<FullSyncProgressFn>,
|
||||
}
|
||||
|
||||
pub struct Timeouts {
|
||||
@ -94,8 +60,106 @@ impl Timeouts {
|
||||
}
|
||||
}
|
||||
}
|
||||
#[derive(Serialize)]
|
||||
struct Empty {}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl SyncServer for HTTPSyncClient {
|
||||
async fn meta(&self) -> Result<SyncMeta> {
|
||||
let input = SyncRequest::Meta(MetaIn {
|
||||
sync_version: SYNC_VERSION_MAX,
|
||||
client_version: sync_client_version().to_string(),
|
||||
});
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn start(&mut self, client_usn: Usn, local_is_newer: bool) -> Result<Graves> {
|
||||
let input = SyncRequest::Start(StartIn {
|
||||
client_usn,
|
||||
local_is_newer,
|
||||
});
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn apply_graves(&mut self, chunk: Graves) -> Result<()> {
|
||||
let input = SyncRequest::ApplyGraves(ApplyGravesIn { chunk });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn apply_changes(&mut self, changes: UnchunkedChanges) -> Result<UnchunkedChanges> {
|
||||
let input = SyncRequest::ApplyChanges(ApplyChangesIn { changes });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn chunk(&mut self) -> Result<Chunk> {
|
||||
let input = SyncRequest::Chunk;
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn apply_chunk(&mut self, chunk: Chunk) -> Result<()> {
|
||||
let input = SyncRequest::ApplyChunk(ApplyChunkIn { chunk });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn sanity_check(&mut self, client: SanityCheckCounts) -> Result<SanityCheckOut> {
|
||||
let input = SyncRequest::SanityCheck(SanityCheckIn { client });
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn finish(&mut self) -> Result<TimestampMillis> {
|
||||
let input = SyncRequest::Finish;
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn abort(&mut self) -> Result<()> {
|
||||
let input = SyncRequest::Abort;
|
||||
self.json_request(input).await
|
||||
}
|
||||
|
||||
async fn full_upload(mut self: Box<Self>, col_path: &Path, _can_consume: bool) -> Result<()> {
|
||||
let file = tokio::fs::File::open(col_path).await?;
|
||||
let total_bytes = file.metadata().await?.len() as usize;
|
||||
let progress_fn = self
|
||||
.full_sync_progress_fn
|
||||
.take()
|
||||
.expect("progress func was not set");
|
||||
let wrap1 = ProgressWrapper {
|
||||
reader: file,
|
||||
progress_fn,
|
||||
progress: FullSyncProgress {
|
||||
transferred_bytes: 0,
|
||||
total_bytes,
|
||||
},
|
||||
};
|
||||
let wrap2 = async_compression::stream::GzipEncoder::new(wrap1);
|
||||
let body = Body::wrap_stream(wrap2);
|
||||
self.upload_inner(body).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Download collection into a temporary file, returning it.
|
||||
/// Caller should persist the file in the correct path after checking it.
|
||||
/// Progress func must be set first.
|
||||
async fn full_download(mut self: Box<Self>) -> Result<NamedTempFile> {
|
||||
let mut temp_file = NamedTempFile::new()?;
|
||||
let (size, mut stream) = self.download_inner().await?;
|
||||
let mut progress = FullSyncProgress {
|
||||
transferred_bytes: 0,
|
||||
total_bytes: size,
|
||||
};
|
||||
let mut progress_fn = self
|
||||
.full_sync_progress_fn
|
||||
.take()
|
||||
.expect("progress func was not set");
|
||||
while let Some(chunk) = stream.next().await {
|
||||
let chunk = chunk?;
|
||||
temp_file.write_all(&chunk)?;
|
||||
progress.transferred_bytes += chunk.len();
|
||||
progress_fn(progress, true);
|
||||
}
|
||||
progress_fn(progress, false);
|
||||
Ok(temp_file)
|
||||
}
|
||||
}
|
||||
|
||||
impl HTTPSyncClient {
|
||||
pub fn new(hkey: Option<String>, host_number: u32) -> HTTPSyncClient {
|
||||
@ -113,34 +177,39 @@ impl HTTPSyncClient {
|
||||
skey,
|
||||
client,
|
||||
endpoint,
|
||||
full_sync_progress_fn: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn json_request<T>(&self, method: &str, json: &T, timeout_long: bool) -> Result<Response>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let req_json = serde_json::to_vec(json)?;
|
||||
|
||||
let mut gz = GzEncoder::new(Vec::new(), Compression::fast());
|
||||
gz.write_all(&req_json)?;
|
||||
let part = multipart::Part::bytes(gz.finish()?);
|
||||
|
||||
self.request(method, part, timeout_long).await
|
||||
pub fn set_full_sync_progress_fn(&mut self, func: Option<FullSyncProgressFn>) {
|
||||
self.full_sync_progress_fn = func;
|
||||
}
|
||||
|
||||
async fn json_request_deserialized<T, T2>(&self, method: &str, json: &T) -> Result<T2>
|
||||
async fn json_request<T>(&self, req: SyncRequest) -> Result<T>
|
||||
where
|
||||
T: Serialize,
|
||||
T2: DeserializeOwned,
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
self.json_request(method, json, false)
|
||||
let (method, req_json) = req.into_method_and_data()?;
|
||||
self.request_bytes(method, &req_json, false)
|
||||
.await?
|
||||
.json()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn request_bytes(
|
||||
&self,
|
||||
method: &str,
|
||||
req: &[u8],
|
||||
timeout_long: bool,
|
||||
) -> Result<Response> {
|
||||
let mut gz = GzEncoder::new(Vec::new(), Compression::fast());
|
||||
gz.write_all(req)?;
|
||||
let part = multipart::Part::bytes(gz.finish()?);
|
||||
let resp = self.request(method, part, timeout_long).await?;
|
||||
resp.error_for_status().map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn request(
|
||||
&self,
|
||||
method: &str,
|
||||
@ -166,11 +235,13 @@ impl HTTPSyncClient {
|
||||
req.send().await?.error_for_status().map_err(Into::into)
|
||||
}
|
||||
|
||||
pub(crate) async fn login(&mut self, username: &str, password: &str) -> Result<()> {
|
||||
let resp: HostKeyOut = self
|
||||
.json_request_deserialized("hostKey", &HostKeyIn { username, password })
|
||||
.await?;
|
||||
self.hkey = Some(resp.key);
|
||||
pub(crate) async fn login<S: Into<String>>(&mut self, username: S, password: S) -> Result<()> {
|
||||
let input = SyncRequest::HostKey(HostKeyIn {
|
||||
username: username.into(),
|
||||
password: password.into(),
|
||||
});
|
||||
let output: HostKeyOut = self.json_request(input).await?;
|
||||
self.hkey = Some(output.key);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -179,108 +250,17 @@ impl HTTPSyncClient {
|
||||
self.hkey.as_ref().unwrap()
|
||||
}
|
||||
|
||||
pub(crate) async fn meta(&self) -> Result<SyncMeta> {
|
||||
let meta_in = MetaIn {
|
||||
sync_version: SYNC_VERSION,
|
||||
client_version: sync_client_version(),
|
||||
};
|
||||
self.json_request_deserialized("meta", &meta_in).await
|
||||
}
|
||||
|
||||
pub(crate) async fn start(
|
||||
&self,
|
||||
local_usn: Usn,
|
||||
minutes_west: Option<i32>,
|
||||
local_is_newer: bool,
|
||||
) -> Result<Graves> {
|
||||
let input = StartIn {
|
||||
local_usn,
|
||||
minutes_west,
|
||||
local_is_newer,
|
||||
local_graves: None,
|
||||
};
|
||||
self.json_request_deserialized("start", &input).await
|
||||
}
|
||||
|
||||
pub(crate) async fn apply_graves(&self, chunk: Graves) -> Result<()> {
|
||||
let input = ApplyGravesIn { chunk };
|
||||
let resp = self.json_request("applyGraves", &input, false).await?;
|
||||
resp.error_for_status()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn apply_changes(
|
||||
&self,
|
||||
changes: UnchunkedChanges,
|
||||
) -> Result<UnchunkedChanges> {
|
||||
let input = ApplyChangesIn { changes };
|
||||
self.json_request_deserialized("applyChanges", &input).await
|
||||
}
|
||||
|
||||
pub(crate) async fn chunk(&self) -> Result<Chunk> {
|
||||
self.json_request_deserialized("chunk", &Empty {}).await
|
||||
}
|
||||
|
||||
pub(crate) async fn apply_chunk(&self, chunk: Chunk) -> Result<()> {
|
||||
let input = ApplyChunkIn { chunk };
|
||||
let resp = self.json_request("applyChunk", &input, false).await?;
|
||||
resp.error_for_status()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn sanity_check(&self, client: SanityCheckCounts) -> Result<SanityCheckOut> {
|
||||
let input = SanityCheckIn { client, full: true };
|
||||
self.json_request_deserialized("sanityCheck2", &input).await
|
||||
}
|
||||
|
||||
pub(crate) async fn finish(&self) -> Result<TimestampMillis> {
|
||||
Ok(self.json_request_deserialized("finish", &Empty {}).await?)
|
||||
}
|
||||
|
||||
pub(crate) async fn abort(&self) -> Result<()> {
|
||||
let resp = self.json_request("abort", &Empty {}, false).await?;
|
||||
resp.error_for_status()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn download_inner(
|
||||
&self,
|
||||
) -> Result<(
|
||||
usize,
|
||||
impl Stream<Item = std::result::Result<Bytes, reqwest::Error>>,
|
||||
)> {
|
||||
let resp: reqwest::Response = self.json_request("download", &Empty {}, true).await?;
|
||||
let resp: reqwest::Response = self.request_bytes("download", b"{}", true).await?;
|
||||
let len = resp.content_length().unwrap_or_default();
|
||||
Ok((len as usize, resp.bytes_stream()))
|
||||
}
|
||||
|
||||
/// Download collection into a temporary file, returning it.
|
||||
/// Caller should persist the file in the correct path after checking it.
|
||||
pub(crate) async fn download<P>(
|
||||
&self,
|
||||
folder: &Path,
|
||||
mut progress_fn: P,
|
||||
) -> Result<NamedTempFile>
|
||||
where
|
||||
P: FnMut(FullSyncProgress, bool),
|
||||
{
|
||||
let mut temp_file = NamedTempFile::new_in(folder)?;
|
||||
let (size, mut stream) = self.download_inner().await?;
|
||||
let mut progress = FullSyncProgress {
|
||||
transferred_bytes: 0,
|
||||
total_bytes: size,
|
||||
};
|
||||
while let Some(chunk) = stream.next().await {
|
||||
let chunk = chunk?;
|
||||
temp_file.write_all(&chunk)?;
|
||||
progress.transferred_bytes += chunk.len();
|
||||
progress_fn(progress, true);
|
||||
}
|
||||
progress_fn(progress, false);
|
||||
|
||||
Ok(temp_file)
|
||||
}
|
||||
|
||||
async fn upload_inner(&self, body: Body) -> Result<()> {
|
||||
let data_part = multipart::Part::stream(body);
|
||||
let resp = self.request("upload", data_part, true).await?;
|
||||
@ -295,27 +275,6 @@ impl HTTPSyncClient {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn upload<P>(&mut self, col_path: &Path, progress_fn: P) -> Result<()>
|
||||
where
|
||||
P: FnMut(FullSyncProgress, bool) + Send + Sync + 'static,
|
||||
{
|
||||
let file = tokio::fs::File::open(col_path).await?;
|
||||
let total_bytes = file.metadata().await?.len() as usize;
|
||||
let wrap1 = ProgressWrapper {
|
||||
reader: file,
|
||||
progress_fn,
|
||||
progress: FullSyncProgress {
|
||||
transferred_bytes: 0,
|
||||
total_bytes,
|
||||
},
|
||||
};
|
||||
let wrap2 = async_compression::stream::GzipEncoder::new(wrap1);
|
||||
let body = Body::wrap_stream(wrap2);
|
||||
self.upload_inner(body).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
use futures::{
|
||||
@ -376,11 +335,11 @@ fn sync_endpoint(host_number: u32) -> String {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::err::SyncErrorKind;
|
||||
use crate::{err::SyncErrorKind, sync::SanityCheckDueCounts};
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
async fn http_client_inner(username: String, password: String) -> Result<()> {
|
||||
let mut syncer = HTTPSyncClient::new(None, 0);
|
||||
let mut syncer = Box::new(HTTPSyncClient::new(None, 0));
|
||||
|
||||
assert!(matches!(
|
||||
syncer.login("nosuchuser", "nosuchpass").await,
|
||||
@ -403,13 +362,13 @@ mod test {
|
||||
})
|
||||
));
|
||||
|
||||
let _graves = syncer.start(Usn(1), None, true).await?;
|
||||
let _graves = syncer.start(Usn(1), true).await?;
|
||||
|
||||
// aborting should now work
|
||||
syncer.abort().await?;
|
||||
|
||||
// start again, and continue
|
||||
let _graves = syncer.start(Usn(1), None, true).await?;
|
||||
let _graves = syncer.start(Usn(1), true).await?;
|
||||
|
||||
syncer.apply_graves(Graves::default()).await?;
|
||||
|
||||
@ -442,20 +401,16 @@ mod test {
|
||||
// failed sanity check will have cleaned up; can't finish
|
||||
// syncer.finish().await?;
|
||||
|
||||
use tempfile::tempdir;
|
||||
syncer.set_full_sync_progress_fn(Some(Box::new(|progress, _throttle| {
|
||||
println!("progress: {:?}", progress);
|
||||
})));
|
||||
let out_path = syncer.full_download().await?;
|
||||
|
||||
let dir = tempdir()?;
|
||||
let out_path = syncer
|
||||
.download(&dir.path(), |progress, _throttle| {
|
||||
println!("progress: {:?}", progress);
|
||||
})
|
||||
.await?;
|
||||
|
||||
syncer
|
||||
.upload(&out_path.path(), |progress, _throttle| {
|
||||
println!("progress {:?}", progress);
|
||||
})
|
||||
.await?;
|
||||
let mut syncer = Box::new(HTTPSyncClient::new(None, 0));
|
||||
syncer.set_full_sync_progress_fn(Some(Box::new(|progress, _throttle| {
|
||||
println!("progress {:?}", progress);
|
||||
})));
|
||||
syncer.full_upload(&out_path.path(), false).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,7 +1,9 @@
|
||||
// Copyright: Ankitects Pty Ltd and contributors
|
||||
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
|
||||
|
||||
pub mod http;
|
||||
mod http_client;
|
||||
mod server;
|
||||
|
||||
use crate::{
|
||||
backend_proto::{sync_status_out, SyncStatusOut},
|
||||
@ -9,27 +11,26 @@ use crate::{
|
||||
deckconf::DeckConfSchema11,
|
||||
decks::DeckSchema11,
|
||||
err::SyncErrorKind,
|
||||
notes::{guid, Note},
|
||||
notes::Note,
|
||||
notetype::{NoteType, NoteTypeSchema11},
|
||||
prelude::*,
|
||||
revlog::RevlogEntry,
|
||||
serde::{default_on_invalid, deserialize_int_from_number},
|
||||
storage::open_and_check_sqlite_file,
|
||||
tags::{join_tags, split_tags},
|
||||
version::sync_client_version,
|
||||
};
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use futures::StreamExt;
|
||||
pub use http_client::FullSyncProgressFn;
|
||||
use http_client::HTTPSyncClient;
|
||||
pub use http_client::Timeouts;
|
||||
use itertools::Itertools;
|
||||
use reqwest::{multipart, Client, Response};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use serde_tuple::Serialize_tuple;
|
||||
use std::io::prelude::*;
|
||||
use std::{collections::HashMap, path::Path, time::Duration};
|
||||
use tempfile::NamedTempFile;
|
||||
pub(crate) use server::{LocalServer, SyncServer};
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub static SYNC_VERSION_MIN: u8 = 7;
|
||||
pub static SYNC_VERSION_MAX: u8 = 10;
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy)]
|
||||
pub struct NormalSyncProgress {
|
||||
@ -53,23 +54,23 @@ impl Default for SyncStage {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Default)]
|
||||
pub struct SyncMeta {
|
||||
#[serde(rename = "mod")]
|
||||
modified: TimestampMillis,
|
||||
pub modified: TimestampMillis,
|
||||
#[serde(rename = "scm")]
|
||||
schema: TimestampMillis,
|
||||
usn: Usn,
|
||||
pub schema: TimestampMillis,
|
||||
pub usn: Usn,
|
||||
#[serde(rename = "ts")]
|
||||
current_time: TimestampSecs,
|
||||
pub current_time: TimestampSecs,
|
||||
#[serde(rename = "msg")]
|
||||
server_message: String,
|
||||
pub server_message: String,
|
||||
#[serde(rename = "cont")]
|
||||
should_continue: bool,
|
||||
pub should_continue: bool,
|
||||
#[serde(rename = "hostNum")]
|
||||
host_number: u32,
|
||||
pub host_number: u32,
|
||||
#[serde(default)]
|
||||
empty: bool,
|
||||
pub empty: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default)]
|
||||
@ -160,21 +161,21 @@ pub struct CardEntry {
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct SanityCheckOut {
|
||||
status: SanityCheckStatus,
|
||||
pub status: SanityCheckStatus,
|
||||
#[serde(rename = "c", default, deserialize_with = "default_on_invalid")]
|
||||
client: Option<SanityCheckCounts>,
|
||||
pub client: Option<SanityCheckCounts>,
|
||||
#[serde(rename = "s", default, deserialize_with = "default_on_invalid")]
|
||||
server: Option<SanityCheckCounts>,
|
||||
pub server: Option<SanityCheckCounts>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum SanityCheckStatus {
|
||||
pub enum SanityCheckStatus {
|
||||
Ok,
|
||||
Bad,
|
||||
}
|
||||
|
||||
#[derive(Serialize_tuple, Deserialize, Debug)]
|
||||
#[derive(Serialize_tuple, Deserialize, Debug, PartialEq)]
|
||||
pub struct SanityCheckCounts {
|
||||
pub counts: SanityCheckDueCounts,
|
||||
pub cards: u32,
|
||||
@ -187,7 +188,7 @@ pub struct SanityCheckCounts {
|
||||
pub deck_config: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize_tuple, Deserialize, Debug, Default)]
|
||||
#[derive(Serialize_tuple, Deserialize, Debug, Default, PartialEq)]
|
||||
pub struct SanityCheckDueCounts {
|
||||
pub new: u32,
|
||||
pub learn: u32,
|
||||
@ -221,6 +222,7 @@ struct SyncState {
|
||||
host_number: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SyncOutput {
|
||||
pub required: SyncActionRequired,
|
||||
pub server_message: String,
|
||||
@ -235,7 +237,7 @@ pub struct SyncAuth {
|
||||
|
||||
struct NormalSyncer<'a, F> {
|
||||
col: &'a mut Collection,
|
||||
remote: HTTPSyncClient,
|
||||
remote: Box<dyn SyncServer>,
|
||||
progress: NormalSyncProgress,
|
||||
progress_fn: F,
|
||||
}
|
||||
@ -292,14 +294,17 @@ impl<F> NormalSyncer<'_, F>
|
||||
where
|
||||
F: FnMut(NormalSyncProgress, bool),
|
||||
{
|
||||
/// Create a new syncing instance. If host_number is unavailable, use 0.
|
||||
pub fn new(col: &mut Collection, auth: SyncAuth, progress_fn: F) -> NormalSyncer<'_, F>
|
||||
pub fn new(
|
||||
col: &mut Collection,
|
||||
server: Box<dyn SyncServer>,
|
||||
progress_fn: F,
|
||||
) -> NormalSyncer<'_, F>
|
||||
where
|
||||
F: FnMut(NormalSyncProgress, bool),
|
||||
{
|
||||
NormalSyncer {
|
||||
col,
|
||||
remote: HTTPSyncClient::new(Some(auth.hkey), auth.host_number),
|
||||
remote: server,
|
||||
progress: NormalSyncProgress::default(),
|
||||
progress_fn,
|
||||
}
|
||||
@ -347,6 +352,7 @@ where
|
||||
|
||||
async fn get_sync_state(&self) -> Result<SyncState> {
|
||||
let remote: SyncMeta = self.remote.meta().await?;
|
||||
debug!(self.col.log, "remote {:?}", &remote);
|
||||
if !remote.should_continue {
|
||||
debug!(self.col.log, "server says abort"; "message"=>&remote.server_message);
|
||||
return Err(AnkiError::SyncError {
|
||||
@ -356,6 +362,7 @@ where
|
||||
}
|
||||
|
||||
let local = self.col.sync_meta()?;
|
||||
debug!(self.col.log, "local {:?}", &local);
|
||||
let delta = remote.current_time.0 - local.current_time.0;
|
||||
if delta.abs() > 300 {
|
||||
debug!(self.col.log, "clock off"; "delta"=>delta);
|
||||
@ -400,11 +407,7 @@ where
|
||||
async fn start_and_process_deletions(&mut self, state: &SyncState) -> Result<()> {
|
||||
let remote: Graves = self
|
||||
.remote
|
||||
.start(
|
||||
state.usn_at_last_sync,
|
||||
self.col.get_local_mins_west(),
|
||||
state.local_is_newer,
|
||||
)
|
||||
.start(state.usn_at_last_sync, state.local_is_newer)
|
||||
.await?;
|
||||
|
||||
debug!(self.col.log, "removed on remote";
|
||||
@ -553,7 +556,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
async fn finalize(&self, state: &SyncState) -> Result<()> {
|
||||
async fn finalize(&mut self, state: &SyncState) -> Result<()> {
|
||||
let new_server_mtime = self.remote.finish().await?;
|
||||
self.col.finalize_sync(state, new_server_mtime)
|
||||
}
|
||||
@ -595,7 +598,7 @@ pub async fn sync_login(username: &str, password: &str) -> Result<SyncAuth> {
|
||||
}
|
||||
|
||||
pub async fn sync_abort(hkey: String, host_number: u32) -> Result<()> {
|
||||
let remote = HTTPSyncClient::new(Some(hkey), host_number);
|
||||
let mut remote = HTTPSyncClient::new(Some(hkey), host_number);
|
||||
remote.abort().await
|
||||
}
|
||||
|
||||
@ -624,45 +627,52 @@ impl Collection {
|
||||
Ok(self.sync_meta()?.compared_to_remote(remote).required.into())
|
||||
}
|
||||
|
||||
/// Create a new syncing instance. If host_number is unavailable, use 0.
|
||||
pub async fn normal_sync<F>(&mut self, auth: SyncAuth, progress_fn: F) -> Result<SyncOutput>
|
||||
where
|
||||
F: FnMut(NormalSyncProgress, bool),
|
||||
{
|
||||
NormalSyncer::new(self, auth, progress_fn).sync().await
|
||||
NormalSyncer::new(
|
||||
self,
|
||||
Box::new(HTTPSyncClient::new(Some(auth.hkey), auth.host_number)),
|
||||
progress_fn,
|
||||
)
|
||||
.sync()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload collection to AnkiWeb. Caller must re-open afterwards.
|
||||
pub async fn full_upload<F>(mut self, auth: SyncAuth, progress_fn: F) -> Result<()>
|
||||
where
|
||||
F: FnMut(FullSyncProgress, bool) + Send + Sync + 'static,
|
||||
{
|
||||
pub async fn full_upload(self, auth: SyncAuth, progress_fn: FullSyncProgressFn) -> Result<()> {
|
||||
let mut server = HTTPSyncClient::new(Some(auth.hkey), auth.host_number);
|
||||
server.set_full_sync_progress_fn(Some(progress_fn));
|
||||
self.full_upload_inner(Box::new(server)).await
|
||||
// remote.upload(&col_path, progress_fn).await?;
|
||||
}
|
||||
|
||||
pub(crate) async fn full_upload_inner(mut self, server: Box<dyn SyncServer>) -> Result<()> {
|
||||
self.before_upload()?;
|
||||
let col_path = self.col_path.clone();
|
||||
self.close(true)?;
|
||||
let mut remote = HTTPSyncClient::new(Some(auth.hkey), auth.host_number);
|
||||
remote.upload(&col_path, progress_fn).await?;
|
||||
Ok(())
|
||||
server.full_upload(&col_path, false).await
|
||||
}
|
||||
|
||||
/// Download collection from AnkiWeb. Caller must re-open afterwards.
|
||||
pub async fn full_download<F>(self, auth: SyncAuth, progress_fn: F) -> Result<()>
|
||||
where
|
||||
F: FnMut(FullSyncProgress, bool),
|
||||
{
|
||||
pub async fn full_download(
|
||||
self,
|
||||
auth: SyncAuth,
|
||||
progress_fn: FullSyncProgressFn,
|
||||
) -> Result<()> {
|
||||
let mut server = HTTPSyncClient::new(Some(auth.hkey), auth.host_number);
|
||||
server.set_full_sync_progress_fn(Some(progress_fn));
|
||||
self.full_download_inner(Box::new(server)).await
|
||||
}
|
||||
|
||||
pub(crate) async fn full_download_inner(self, server: Box<dyn SyncServer>) -> Result<()> {
|
||||
let col_path = self.col_path.clone();
|
||||
let folder = col_path.parent().unwrap();
|
||||
self.close(false)?;
|
||||
let remote = HTTPSyncClient::new(Some(auth.hkey), auth.host_number);
|
||||
let out_file = remote.download(folder, progress_fn).await?;
|
||||
let out_file = server.full_download().await?;
|
||||
// check file ok
|
||||
let db = rusqlite::Connection::open(out_file.path())?;
|
||||
let check_result: String = db.pragma_query_value(None, "integrity_check", |r| r.get(0))?;
|
||||
if check_result != "ok" {
|
||||
return Err(AnkiError::SyncError {
|
||||
info: "download corrupt".into(),
|
||||
kind: SyncErrorKind::Other,
|
||||
});
|
||||
}
|
||||
let db = open_and_check_sqlite_file(out_file.path())?;
|
||||
db.execute_batch("update col set ls=mod")?;
|
||||
drop(db);
|
||||
// overwrite existing collection atomically
|
||||
@ -683,11 +693,11 @@ impl Collection {
|
||||
server_message: "".into(),
|
||||
should_continue: true,
|
||||
host_number: 0,
|
||||
empty: self.storage.have_at_least_one_card()?,
|
||||
empty: !self.storage.have_at_least_one_card()?,
|
||||
})
|
||||
}
|
||||
|
||||
fn apply_graves(&self, graves: Graves, latest_usn: Usn) -> Result<()> {
|
||||
pub fn apply_graves(&self, graves: Graves, latest_usn: Usn) -> Result<()> {
|
||||
for nid in graves.notes {
|
||||
self.storage.remove_note(nid)?;
|
||||
self.storage.add_note_grave(nid, latest_usn)?;
|
||||
@ -831,7 +841,7 @@ impl Collection {
|
||||
for nt in notetypes {
|
||||
let nt: NoteType = nt.into();
|
||||
let proceed = if let Some(existing_nt) = self.storage.get_notetype(nt.id)? {
|
||||
if existing_nt.mtime_secs < nt.mtime_secs {
|
||||
if existing_nt.mtime_secs <= nt.mtime_secs {
|
||||
if (existing_nt.fields.len() != nt.fields.len())
|
||||
|| (existing_nt.templates.len() != nt.templates.len())
|
||||
{
|
||||
@ -858,7 +868,7 @@ impl Collection {
|
||||
fn merge_decks(&mut self, decks: Vec<DeckSchema11>) -> Result<()> {
|
||||
for deck in decks {
|
||||
let proceed = if let Some(existing_deck) = self.storage.get_deck(deck.id())? {
|
||||
existing_deck.mtime_secs < deck.common().mtime
|
||||
existing_deck.mtime_secs <= deck.common().mtime
|
||||
} else {
|
||||
true
|
||||
};
|
||||
@ -874,7 +884,7 @@ impl Collection {
|
||||
fn merge_deck_config(&self, dconf: Vec<DeckConfSchema11>) -> Result<()> {
|
||||
for conf in dconf {
|
||||
let proceed = if let Some(existing_conf) = self.storage.get_deck_config(conf.id)? {
|
||||
existing_conf.mtime_secs < conf.mtime
|
||||
existing_conf.mtime_secs <= conf.mtime
|
||||
} else {
|
||||
true
|
||||
};
|
||||
@ -896,6 +906,9 @@ impl Collection {
|
||||
// Remote->local chunks
|
||||
//----------------------------------------------------------------
|
||||
|
||||
/// pending_usn is used to decide whether the local objects are newer.
|
||||
/// If the provided objects are not modified locally, the USN inside
|
||||
/// the individual objects is used.
|
||||
fn apply_chunk(&mut self, chunk: Chunk, pending_usn: Usn) -> Result<()> {
|
||||
self.merge_revlog(chunk.revlog)?;
|
||||
self.merge_cards(chunk.cards, pending_usn)?;
|
||||
@ -1173,6 +1186,12 @@ impl From<SyncActionRequired> for sync_status_out::Required {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use super::server::LocalServer;
|
||||
use super::*;
|
||||
use crate::log;
|
||||
use crate::{
|
||||
@ -1186,40 +1205,145 @@ mod test {
|
||||
|
||||
fn full_progress(_: FullSyncProgress, _: bool) {}
|
||||
|
||||
struct TestContext {
|
||||
dir: TempDir,
|
||||
auth: SyncAuth,
|
||||
col1: Option<Collection>,
|
||||
col2: Option<Collection>,
|
||||
#[test]
|
||||
/// Run remote tests if hkey provided in environment; otherwise local.
|
||||
fn syncing() -> Result<()> {
|
||||
let ctx: Box<dyn TestContext> = if let Ok(hkey) = std::env::var("TEST_HKEY") {
|
||||
Box::new(RemoteTestContext {
|
||||
auth: SyncAuth {
|
||||
hkey,
|
||||
host_number: 0,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
Box::new(LocalTestContext {})
|
||||
};
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(upload_download(&ctx))?;
|
||||
rt.block_on(regular_sync(&ctx))
|
||||
}
|
||||
|
||||
fn open_col(ctx: &TestContext, fname: &str) -> Result<Collection> {
|
||||
let path = ctx.dir.path().join(fname);
|
||||
fn open_col(dir: &Path, server: bool, fname: &str) -> Result<Collection> {
|
||||
let path = dir.join(fname);
|
||||
let i18n = I18n::new(&[""], "", log::terminal());
|
||||
open_collection(path, "".into(), "".into(), false, i18n, log::terminal())
|
||||
open_collection(path, "".into(), "".into(), server, i18n, log::terminal())
|
||||
}
|
||||
|
||||
async fn upload_download(ctx: &mut TestContext) -> Result<()> {
|
||||
// add a card
|
||||
let mut col1 = open_col(ctx, "col1.anki2")?;
|
||||
let nt = col1.get_notetype_by_name("Basic")?.unwrap();
|
||||
#[async_trait(?Send)]
|
||||
trait TestContext {
|
||||
fn server(&self) -> Box<dyn SyncServer>;
|
||||
|
||||
fn col1(&self) -> Collection {
|
||||
open_col(self.dir(), false, "col1.anki2").unwrap()
|
||||
}
|
||||
|
||||
fn col2(&self) -> Collection {
|
||||
open_col(self.dir(), false, "col2.anki2").unwrap()
|
||||
}
|
||||
|
||||
fn dir(&self) -> &Path {
|
||||
lazy_static! {
|
||||
static ref DIR: TempDir = tempdir().unwrap();
|
||||
}
|
||||
DIR.path()
|
||||
}
|
||||
|
||||
async fn normal_sync(&self, col: &mut Collection) -> SyncOutput {
|
||||
NormalSyncer::new(col, self.server(), norm_progress)
|
||||
.sync()
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn full_upload(&self, col: Collection) {
|
||||
col.full_upload_inner(self.server()).await.unwrap()
|
||||
}
|
||||
|
||||
async fn full_download(&self, col: Collection) {
|
||||
col.full_download_inner(self.server()).await.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// Local specifics
|
||||
/////////////////////
|
||||
|
||||
struct LocalTestContext {}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl TestContext for LocalTestContext {
|
||||
fn server(&self) -> Box<dyn SyncServer> {
|
||||
let col = open_col(self.dir(), true, "server.anki2").unwrap();
|
||||
Box::new(LocalServer::new(col))
|
||||
}
|
||||
}
|
||||
|
||||
// Remote specifics
|
||||
/////////////////////
|
||||
|
||||
struct RemoteTestContext {
|
||||
auth: SyncAuth,
|
||||
}
|
||||
|
||||
impl RemoteTestContext {
|
||||
fn server_inner(&self) -> HTTPSyncClient {
|
||||
let auth = self.auth.clone();
|
||||
HTTPSyncClient::new(Some(auth.hkey), auth.host_number)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl TestContext for RemoteTestContext {
|
||||
fn server(&self) -> Box<dyn SyncServer> {
|
||||
Box::new(self.server_inner())
|
||||
}
|
||||
|
||||
async fn full_upload(&self, col: Collection) {
|
||||
let mut server = self.server_inner();
|
||||
server.set_full_sync_progress_fn(Some(Box::new(full_progress)));
|
||||
col.full_upload_inner(Box::new(server)).await.unwrap()
|
||||
}
|
||||
|
||||
async fn full_download(&self, col: Collection) {
|
||||
let mut server = self.server_inner();
|
||||
server.set_full_sync_progress_fn(Some(Box::new(full_progress)));
|
||||
col.full_download_inner(Box::new(server)).await.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// Setup + full syncs
|
||||
/////////////////////
|
||||
|
||||
fn col1_setup(col: &mut Collection) {
|
||||
let nt = col.get_notetype_by_name("Basic").unwrap().unwrap();
|
||||
let mut note = nt.new_note();
|
||||
note.fields[0] = "1".into();
|
||||
col1.add_note(&mut note, DeckID(1))?;
|
||||
col.add_note(&mut note, DeckID(1)).unwrap();
|
||||
|
||||
let out: SyncOutput = col1.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
// // set our schema time back, so when initial server
|
||||
// // col is created, it's not identical
|
||||
// col.storage
|
||||
// .db
|
||||
// .execute_batch("update col set scm = 123")
|
||||
// .unwrap()
|
||||
}
|
||||
|
||||
async fn upload_download(ctx: &Box<dyn TestContext>) -> Result<()> {
|
||||
let mut col1 = ctx.col1();
|
||||
col1_setup(&mut col1);
|
||||
|
||||
let out = ctx.normal_sync(&mut col1).await;
|
||||
assert!(matches!(
|
||||
out.required,
|
||||
SyncActionRequired::FullSyncRequired { .. }
|
||||
));
|
||||
|
||||
col1.full_upload(ctx.auth.clone(), full_progress).await?;
|
||||
ctx.full_upload(col1).await;
|
||||
|
||||
// another collection
|
||||
let mut col2 = open_col(ctx, "col2.anki2")?;
|
||||
let mut col2 = ctx.col2();
|
||||
|
||||
// won't allow ankiweb clobber
|
||||
let out: SyncOutput = col2.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col2).await;
|
||||
assert_eq!(
|
||||
out.required,
|
||||
SyncActionRequired::FullSyncRequired {
|
||||
@ -1229,20 +1353,19 @@ mod test {
|
||||
);
|
||||
|
||||
// fetch so we're in sync
|
||||
col2.full_download(ctx.auth.clone(), full_progress).await?;
|
||||
|
||||
// reopen the two collections
|
||||
ctx.col1 = Some(open_col(ctx, "col1.anki2")?);
|
||||
ctx.col2 = Some(open_col(ctx, "col2.anki2")?);
|
||||
ctx.full_download(col2).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn regular_sync(ctx: &mut TestContext) -> Result<()> {
|
||||
let col1 = ctx.col1.as_mut().unwrap();
|
||||
let col2 = ctx.col2.as_mut().unwrap();
|
||||
// Regular syncs
|
||||
/////////////////////
|
||||
|
||||
async fn regular_sync(ctx: &Box<dyn TestContext>) -> Result<()> {
|
||||
// add a deck
|
||||
let mut col1 = ctx.col1();
|
||||
let mut col2 = ctx.col2();
|
||||
|
||||
let mut deck = col1.get_or_create_normal_deck("new deck")?;
|
||||
|
||||
// give it a new option group
|
||||
@ -1280,15 +1403,15 @@ mod test {
|
||||
// col1.storage.set_creation_stamp(TimestampSecs(12345))?;
|
||||
|
||||
// and sync our changes
|
||||
let remote = get_remote_sync_meta(ctx.auth.clone()).await?;
|
||||
let out = col1.get_sync_status(remote)?;
|
||||
let remote_meta = ctx.server().meta().await.unwrap();
|
||||
let out = col1.get_sync_status(remote_meta)?;
|
||||
assert_eq!(out, sync_status_out::Required::NormalSync);
|
||||
|
||||
let out: SyncOutput = col1.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col1).await;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
|
||||
// sync the other collection
|
||||
let out: SyncOutput = col2.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col2).await;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
|
||||
let ntid = nt.id;
|
||||
@ -1329,7 +1452,7 @@ mod test {
|
||||
);
|
||||
assert_eq!(
|
||||
col1.storage.creation_stamp()?,
|
||||
col1.storage.creation_stamp()?
|
||||
col2.storage.creation_stamp()?
|
||||
);
|
||||
|
||||
// server doesn't send tag usns, so we can only compare tags, not usns,
|
||||
@ -1351,7 +1474,7 @@ mod test {
|
||||
};
|
||||
|
||||
// make sure everything has been transferred across
|
||||
compare_sides(col1, col2)?;
|
||||
compare_sides(&mut col1, &mut col2)?;
|
||||
|
||||
// make some modifications
|
||||
let mut note = col2.storage.get_note(note.id)?.unwrap();
|
||||
@ -1373,13 +1496,13 @@ mod test {
|
||||
col2.update_notetype(&mut nt, false)?;
|
||||
|
||||
// sync the changes back
|
||||
let out: SyncOutput = col2.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col2).await;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
let out: SyncOutput = col1.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col1).await;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
|
||||
// should still match
|
||||
compare_sides(col1, col2)?;
|
||||
compare_sides(&mut col1, &mut col2)?;
|
||||
|
||||
// deletions should sync too
|
||||
for table in &["cards", "notes", "decks"] {
|
||||
@ -1392,12 +1515,13 @@ mod test {
|
||||
|
||||
// fixme: inconsistent usn arg
|
||||
col1.remove_cards_and_orphaned_notes(&[cardid])?;
|
||||
col1.remove_note_only(noteid, col1.usn()?)?;
|
||||
let usn = col1.usn()?;
|
||||
col1.remove_note_only(noteid, usn)?;
|
||||
col1.remove_deck_and_child_decks(deckid)?;
|
||||
|
||||
let out: SyncOutput = col1.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col1).await;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
let out: SyncOutput = col2.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col2).await;
|
||||
assert_eq!(out.required, SyncActionRequired::NoChanges);
|
||||
|
||||
for table in &["cards", "notes", "decks"] {
|
||||
@ -1410,32 +1534,8 @@ mod test {
|
||||
|
||||
// removing things like a notetype forces a full sync
|
||||
col2.remove_notetype(ntid)?;
|
||||
let out: SyncOutput = col2.normal_sync(ctx.auth.clone(), norm_progress).await?;
|
||||
let out = ctx.normal_sync(&mut col2).await;
|
||||
assert!(matches!(out.required, SyncActionRequired::FullSyncRequired { .. }));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collection_sync() -> Result<()> {
|
||||
let hkey = match std::env::var("TEST_HKEY") {
|
||||
Ok(s) => s,
|
||||
Err(_) => {
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let mut ctx = TestContext {
|
||||
dir: tempdir()?,
|
||||
auth: SyncAuth {
|
||||
hkey,
|
||||
host_number: 0,
|
||||
},
|
||||
col1: None,
|
||||
col2: None,
|
||||
};
|
||||
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.block_on(upload_download(&mut ctx))?;
|
||||
rt.block_on(regular_sync(&mut ctx))
|
||||
}
|
||||
}
|
||||
|
189
rslib/src/sync/server.rs
Normal file
189
rslib/src/sync/server.rs
Normal file
@ -0,0 +1,189 @@
|
||||
use std::{fs, path::Path};
|
||||
|
||||
use crate::{
|
||||
prelude::*,
|
||||
storage::open_and_check_sqlite_file,
|
||||
sync::{
|
||||
Chunk, Graves, SanityCheckCounts, SanityCheckOut, SanityCheckStatus, SyncMeta,
|
||||
UnchunkedChanges, Usn,
|
||||
},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
use super::ChunkableIDs;
|
||||
#[async_trait(?Send)]
|
||||
pub trait SyncServer {
|
||||
async fn meta(&self) -> Result<SyncMeta>;
|
||||
async fn start(&mut self, client_usn: Usn, local_is_newer: bool) -> Result<Graves>;
|
||||
async fn apply_graves(&mut self, client_chunk: Graves) -> Result<()>;
|
||||
async fn apply_changes(&mut self, client_changes: UnchunkedChanges)
|
||||
-> Result<UnchunkedChanges>;
|
||||
async fn chunk(&mut self) -> Result<Chunk>;
|
||||
async fn apply_chunk(&mut self, client_chunk: Chunk) -> Result<()>;
|
||||
async fn sanity_check(&mut self, client: SanityCheckCounts) -> Result<SanityCheckOut>;
|
||||
async fn finish(&mut self) -> Result<TimestampMillis>;
|
||||
async fn abort(&mut self) -> Result<()>;
|
||||
|
||||
/// If `can_consume` is true, the local server will move or remove the file, instead
|
||||
/// creating a copy. The remote server ignores this argument.
|
||||
async fn full_upload(self: Box<Self>, col_path: &Path, can_consume: bool) -> Result<()>;
|
||||
async fn full_download(self: Box<Self>) -> Result<NamedTempFile>;
|
||||
}
|
||||
|
||||
pub struct LocalServer {
|
||||
col: Collection,
|
||||
|
||||
// The current sync protocol is stateful, so unfortunately we need to
|
||||
// retain a bunch of information across requests. These are set either
|
||||
// on start, or on subsequent methods.
|
||||
server_usn: Usn,
|
||||
client_usn: Usn,
|
||||
/// Only used to determine whether we should send our
|
||||
/// config to client.
|
||||
client_is_newer: bool,
|
||||
/// Set on the first call to chunk()
|
||||
server_chunk_ids: Option<ChunkableIDs>,
|
||||
}
|
||||
|
||||
impl LocalServer {
|
||||
#[allow(dead_code)]
|
||||
pub fn new(col: Collection) -> LocalServer {
|
||||
assert!(col.server);
|
||||
LocalServer {
|
||||
col,
|
||||
server_usn: Usn(0),
|
||||
client_usn: Usn(0),
|
||||
client_is_newer: false,
|
||||
server_chunk_ids: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes self and returns the stored collection. If a sync has begun, caller must ensure they
|
||||
/// call .finish() or .abort() before calling this.
|
||||
pub fn into_col(self) -> Collection {
|
||||
self.col
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl SyncServer for LocalServer {
|
||||
async fn meta(&self) -> Result<SyncMeta> {
|
||||
Ok(SyncMeta {
|
||||
modified: self.col.storage.get_modified_time()?,
|
||||
schema: self.col.storage.get_schema_mtime()?,
|
||||
usn: self.col.storage.usn(true)?,
|
||||
current_time: TimestampSecs::now(),
|
||||
server_message: String::new(),
|
||||
should_continue: true,
|
||||
host_number: 0,
|
||||
empty: !self.col.storage.have_at_least_one_card()?,
|
||||
})
|
||||
}
|
||||
|
||||
async fn start(&mut self, client_usn: Usn, client_is_newer: bool) -> Result<Graves> {
|
||||
self.server_usn = self.col.usn()?;
|
||||
self.client_usn = client_usn;
|
||||
self.client_is_newer = client_is_newer;
|
||||
|
||||
self.col.storage.begin_rust_trx()?;
|
||||
self.col.storage.pending_graves(client_usn)
|
||||
}
|
||||
|
||||
async fn apply_graves(&mut self, client_chunk: Graves) -> Result<()> {
|
||||
self.col.apply_graves(client_chunk, self.server_usn)
|
||||
}
|
||||
|
||||
async fn apply_changes(
|
||||
&mut self,
|
||||
client_changes: UnchunkedChanges,
|
||||
) -> Result<UnchunkedChanges> {
|
||||
let server_changes =
|
||||
self.col
|
||||
.local_unchunked_changes(self.client_usn, None, !self.client_is_newer)?;
|
||||
self.col.apply_changes(client_changes, self.server_usn)?;
|
||||
Ok(server_changes)
|
||||
}
|
||||
|
||||
async fn chunk(&mut self) -> Result<Chunk> {
|
||||
if self.server_chunk_ids.is_none() {
|
||||
self.server_chunk_ids = Some(self.col.get_chunkable_ids(self.client_usn)?);
|
||||
}
|
||||
|
||||
self.col
|
||||
.get_chunk(self.server_chunk_ids.as_mut().unwrap(), None)
|
||||
}
|
||||
|
||||
async fn apply_chunk(&mut self, client_chunk: Chunk) -> Result<()> {
|
||||
self.col.apply_chunk(client_chunk, self.client_usn)
|
||||
}
|
||||
|
||||
async fn sanity_check(&mut self, mut client: SanityCheckCounts) -> Result<SanityCheckOut> {
|
||||
client.counts = Default::default();
|
||||
let server = self.col.storage.sanity_check_info()?;
|
||||
Ok(SanityCheckOut {
|
||||
status: if client == server {
|
||||
SanityCheckStatus::Ok
|
||||
} else {
|
||||
SanityCheckStatus::Bad
|
||||
},
|
||||
client: Some(client),
|
||||
server: Some(server),
|
||||
})
|
||||
}
|
||||
|
||||
async fn finish(&mut self) -> Result<TimestampMillis> {
|
||||
let now = TimestampMillis::now();
|
||||
self.col.storage.set_modified_time(now)?;
|
||||
self.col.storage.set_last_sync(now)?;
|
||||
self.col.storage.increment_usn()?;
|
||||
self.col.storage.commit_rust_trx()?;
|
||||
Ok(now)
|
||||
}
|
||||
|
||||
async fn abort(&mut self) -> Result<()> {
|
||||
self.col.storage.rollback_rust_trx()
|
||||
}
|
||||
|
||||
/// `col_path` should point to the uploaded file, and the caller is
|
||||
/// responsible for imposing limits on its size if it wishes.
|
||||
/// If `can_consume` is true, the provided file will be moved into place,
|
||||
/// or removed on failure. If false, the original will be left alone.
|
||||
async fn full_upload(
|
||||
mut self: Box<Self>,
|
||||
mut col_path: &Path,
|
||||
can_consume: bool,
|
||||
) -> Result<()> {
|
||||
// create a copy if necessary
|
||||
let new_file: NamedTempFile;
|
||||
if !can_consume {
|
||||
new_file = NamedTempFile::new()?;
|
||||
fs::copy(col_path, &new_file.path())?;
|
||||
col_path = new_file.path();
|
||||
}
|
||||
|
||||
open_and_check_sqlite_file(col_path).map_err(|check_err| {
|
||||
match fs::remove_file(col_path) {
|
||||
Ok(_) => check_err,
|
||||
Err(remove_err) => remove_err.into(),
|
||||
}
|
||||
})?;
|
||||
|
||||
let target_col_path = self.col.col_path.clone();
|
||||
self.col.close(false)?;
|
||||
fs::rename(col_path, &target_col_path).map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn full_download(mut self: Box<Self>) -> Result<NamedTempFile> {
|
||||
// bump usn/mod & close
|
||||
self.col.transact(None, |col| col.storage.increment_usn())?;
|
||||
let col_path = self.col.col_path.clone();
|
||||
self.col.close(true)?;
|
||||
|
||||
// copy file and return path
|
||||
let temp_file = NamedTempFile::new()?;
|
||||
fs::copy(&col_path, temp_file.path())?;
|
||||
|
||||
Ok(temp_file)
|
||||
}
|
||||
}
|
@ -27,6 +27,14 @@ impl TimestampSecs {
|
||||
pub(crate) fn date_string(self, offset: FixedOffset) -> String {
|
||||
offset.timestamp(self.0, 0).format("%Y-%m-%d").to_string()
|
||||
}
|
||||
|
||||
pub fn local_utc_offset(self) -> FixedOffset {
|
||||
*Local.timestamp(self.0, 0).offset()
|
||||
}
|
||||
|
||||
pub fn datetime(self, utc_offset: FixedOffset) -> DateTime<FixedOffset> {
|
||||
utc_offset.timestamp(self.0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TimestampMillis {
|
||||
|
2
run
2
run
@ -11,7 +11,7 @@ run_mac() {
|
||||
# so we need to copy the files into a working folder before running on a Mac.
|
||||
workspace=$(dirname $0)
|
||||
bazel build $BUILDARGS //qt:runanki && \
|
||||
rsync -aiL --exclude=anki/external --exclude=__pycache__ --delete --force-delete \
|
||||
rsync -aiL --exclude=anki/external --delete -f'-p __pycache__' \
|
||||
$workspace/bazel-bin/qt/runanki* $workspace/bazel-copy/ && \
|
||||
$workspace/bazel-copy/runanki $*
|
||||
}
|
||||
|
@ -2,4 +2,4 @@
|
||||
|
||||
set -e
|
||||
|
||||
BUILDARGS="-c opt" $(dirname $0)/../run
|
||||
BUILDARGS="-c opt --@io_bazel_rules_rust//worker:use_worker=False" $(dirname $0)/../run $*
|
||||
|
Loading…
Reference in New Issue
Block a user